1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/aer.h> 27 #include <linux/phylink.h> 28 29 #include "qed.h" 30 #include "qed_sriov.h" 31 #include "qed_sp.h" 32 #include "qed_dev_api.h" 33 #include "qed_ll2.h" 34 #include "qed_fcoe.h" 35 #include "qed_iscsi.h" 36 37 #include "qed_mcp.h" 38 #include "qed_reg_addr.h" 39 #include "qed_hw.h" 40 #include "qed_selftest.h" 41 #include "qed_debug.h" 42 #include "qed_devlink.h" 43 44 #define QED_ROCE_QPS (8192) 45 #define QED_ROCE_DPIS (8) 46 #define QED_RDMA_SRQS QED_ROCE_QPS 47 #define QED_NVM_CFG_GET_FLAGS 0xA 48 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 49 #define QED_NVM_CFG_MAX_ATTRS 50 50 51 static char version[] = 52 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 53 54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 55 MODULE_LICENSE("GPL"); 56 MODULE_VERSION(DRV_MODULE_VERSION); 57 58 #define FW_FILE_VERSION \ 59 __stringify(FW_MAJOR_VERSION) "." \ 60 __stringify(FW_MINOR_VERSION) "." \ 61 __stringify(FW_REVISION_VERSION) "." \ 62 __stringify(FW_ENGINEERING_VERSION) 63 64 #define QED_FW_FILE_NAME \ 65 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 66 67 MODULE_FIRMWARE(QED_FW_FILE_NAME); 68 69 /* MFW speed capabilities maps */ 70 71 struct qed_mfw_speed_map { 72 u32 mfw_val; 73 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 74 75 const u32 *cap_arr; 76 u32 arr_size; 77 }; 78 79 #define QED_MFW_SPEED_MAP(type, arr) \ 80 { \ 81 .mfw_val = (type), \ 82 .cap_arr = (arr), \ 83 .arr_size = ARRAY_SIZE(arr), \ 84 } 85 86 static const u32 qed_mfw_ext_1g[] __initconst = { 87 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 88 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 89 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 90 }; 91 92 static const u32 qed_mfw_ext_10g[] __initconst = { 93 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 95 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 96 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 97 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 99 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 100 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 101 }; 102 103 static const u32 qed_mfw_ext_20g[] __initconst = { 104 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 105 }; 106 107 static const u32 qed_mfw_ext_25g[] __initconst = { 108 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 109 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 110 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 111 }; 112 113 static const u32 qed_mfw_ext_40g[] __initconst = { 114 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 115 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 116 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 117 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 118 }; 119 120 static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 121 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 122 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 123 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 124 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 126 }; 127 128 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 129 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 130 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 131 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 132 }; 133 134 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 135 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 136 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 137 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 138 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 140 }; 141 142 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 143 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 144 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 145 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 146 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 147 }; 148 149 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), 153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 155 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 156 qed_mfw_ext_50g_base_r), 157 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 158 qed_mfw_ext_50g_base_r2), 159 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 160 qed_mfw_ext_100g_base_r2), 161 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 162 qed_mfw_ext_100g_base_r4), 163 }; 164 165 static const u32 qed_mfw_legacy_1g[] __initconst = { 166 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 167 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 168 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 169 }; 170 171 static const u32 qed_mfw_legacy_10g[] __initconst = { 172 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 173 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 174 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 175 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 176 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 177 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 178 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 179 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 180 }; 181 182 static const u32 qed_mfw_legacy_20g[] __initconst = { 183 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 184 }; 185 186 static const u32 qed_mfw_legacy_25g[] __initconst = { 187 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 188 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 189 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 190 }; 191 192 static const u32 qed_mfw_legacy_40g[] __initconst = { 193 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 194 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 195 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 196 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 197 }; 198 199 static const u32 qed_mfw_legacy_50g[] __initconst = { 200 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 201 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 202 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 203 }; 204 205 static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 206 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 207 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 208 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 209 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 210 }; 211 212 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 213 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 214 qed_mfw_legacy_1g), 215 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 216 qed_mfw_legacy_10g), 217 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 218 qed_mfw_legacy_20g), 219 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 220 qed_mfw_legacy_25g), 221 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 222 qed_mfw_legacy_40g), 223 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 224 qed_mfw_legacy_50g), 225 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 226 qed_mfw_legacy_bb_100g), 227 }; 228 229 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 230 { 231 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 232 233 map->cap_arr = NULL; 234 map->arr_size = 0; 235 } 236 237 static void __init qed_mfw_speed_maps_init(void) 238 { 239 u32 i; 240 241 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 242 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 243 244 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 245 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 246 } 247 248 static int __init qed_init(void) 249 { 250 pr_info("%s", version); 251 252 qed_mfw_speed_maps_init(); 253 254 return 0; 255 } 256 module_init(qed_init); 257 258 static void __exit qed_exit(void) 259 { 260 /* To prevent marking this module as "permanent" */ 261 } 262 module_exit(qed_exit); 263 264 /* Check if the DMA controller on the machine can properly handle the DMA 265 * addressing required by the device. 266 */ 267 static int qed_set_coherency_mask(struct qed_dev *cdev) 268 { 269 struct device *dev = &cdev->pdev->dev; 270 271 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 272 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 273 DP_NOTICE(cdev, 274 "Can't request 64-bit consistent allocations\n"); 275 return -EIO; 276 } 277 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 278 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 279 return -EIO; 280 } 281 282 return 0; 283 } 284 285 static void qed_free_pci(struct qed_dev *cdev) 286 { 287 struct pci_dev *pdev = cdev->pdev; 288 289 pci_disable_pcie_error_reporting(pdev); 290 291 if (cdev->doorbells && cdev->db_size) 292 iounmap(cdev->doorbells); 293 if (cdev->regview) 294 iounmap(cdev->regview); 295 if (atomic_read(&pdev->enable_cnt) == 1) 296 pci_release_regions(pdev); 297 298 pci_disable_device(pdev); 299 } 300 301 #define PCI_REVISION_ID_ERROR_VAL 0xff 302 303 /* Performs PCI initializations as well as initializing PCI-related parameters 304 * in the device structrue. Returns 0 in case of success. 305 */ 306 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 307 { 308 u8 rev_id; 309 int rc; 310 311 cdev->pdev = pdev; 312 313 rc = pci_enable_device(pdev); 314 if (rc) { 315 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 316 goto err0; 317 } 318 319 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 320 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 321 rc = -EIO; 322 goto err1; 323 } 324 325 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 326 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 327 rc = -EIO; 328 goto err1; 329 } 330 331 if (atomic_read(&pdev->enable_cnt) == 1) { 332 rc = pci_request_regions(pdev, "qed"); 333 if (rc) { 334 DP_NOTICE(cdev, 335 "Failed to request PCI memory resources\n"); 336 goto err1; 337 } 338 pci_set_master(pdev); 339 pci_save_state(pdev); 340 } 341 342 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 343 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 344 DP_NOTICE(cdev, 345 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 346 rev_id); 347 rc = -ENODEV; 348 goto err2; 349 } 350 if (!pci_is_pcie(pdev)) { 351 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 352 rc = -EIO; 353 goto err2; 354 } 355 356 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 357 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 358 DP_NOTICE(cdev, "Cannot find power management capability\n"); 359 360 rc = qed_set_coherency_mask(cdev); 361 if (rc) 362 goto err2; 363 364 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 365 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 366 cdev->pci_params.irq = pdev->irq; 367 368 cdev->regview = pci_ioremap_bar(pdev, 0); 369 if (!cdev->regview) { 370 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 371 rc = -ENOMEM; 372 goto err2; 373 } 374 375 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 376 cdev->db_size = pci_resource_len(cdev->pdev, 2); 377 if (!cdev->db_size) { 378 if (IS_PF(cdev)) { 379 DP_NOTICE(cdev, "No Doorbell bar available\n"); 380 return -EINVAL; 381 } else { 382 return 0; 383 } 384 } 385 386 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 387 388 if (!cdev->doorbells) { 389 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 390 return -ENOMEM; 391 } 392 393 /* AER (Advanced Error reporting) configuration */ 394 rc = pci_enable_pcie_error_reporting(pdev); 395 if (rc) 396 DP_VERBOSE(cdev, NETIF_MSG_DRV, 397 "Failed to configure PCIe AER [%d]\n", rc); 398 399 return 0; 400 401 err2: 402 pci_release_regions(pdev); 403 err1: 404 pci_disable_device(pdev); 405 err0: 406 return rc; 407 } 408 409 int qed_fill_dev_info(struct qed_dev *cdev, 410 struct qed_dev_info *dev_info) 411 { 412 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 413 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 414 struct qed_tunnel_info *tun = &cdev->tunnel; 415 struct qed_ptt *ptt; 416 417 memset(dev_info, 0, sizeof(struct qed_dev_info)); 418 419 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 420 tun->vxlan.b_mode_enabled) 421 dev_info->vxlan_enable = true; 422 423 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 424 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 425 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 426 dev_info->gre_enable = true; 427 428 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 429 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 430 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 431 dev_info->geneve_enable = true; 432 433 dev_info->num_hwfns = cdev->num_hwfns; 434 dev_info->pci_mem_start = cdev->pci_params.mem_start; 435 dev_info->pci_mem_end = cdev->pci_params.mem_end; 436 dev_info->pci_irq = cdev->pci_params.irq; 437 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 438 dev_info->dev_type = cdev->type; 439 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 440 441 if (IS_PF(cdev)) { 442 dev_info->fw_major = FW_MAJOR_VERSION; 443 dev_info->fw_minor = FW_MINOR_VERSION; 444 dev_info->fw_rev = FW_REVISION_VERSION; 445 dev_info->fw_eng = FW_ENGINEERING_VERSION; 446 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 447 &cdev->mf_bits); 448 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) 449 dev_info->b_arfs_capable = true; 450 dev_info->tx_switching = true; 451 452 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 453 dev_info->wol_support = true; 454 455 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 456 457 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 458 } else { 459 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 460 &dev_info->fw_minor, &dev_info->fw_rev, 461 &dev_info->fw_eng); 462 } 463 464 if (IS_PF(cdev)) { 465 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 466 if (ptt) { 467 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 468 &dev_info->mfw_rev, NULL); 469 470 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 471 &dev_info->mbi_version); 472 473 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 474 &dev_info->flash_size); 475 476 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 477 } 478 } else { 479 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 480 &dev_info->mfw_rev, NULL); 481 } 482 483 dev_info->mtu = hw_info->mtu; 484 cdev->common_dev_info = *dev_info; 485 486 return 0; 487 } 488 489 static void qed_free_cdev(struct qed_dev *cdev) 490 { 491 kfree((void *)cdev); 492 } 493 494 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 495 { 496 struct qed_dev *cdev; 497 498 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 499 if (!cdev) 500 return cdev; 501 502 qed_init_struct(cdev); 503 504 return cdev; 505 } 506 507 /* Sets the requested power state */ 508 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 509 { 510 if (!cdev) 511 return -ENODEV; 512 513 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 514 return 0; 515 } 516 517 /* probing */ 518 static struct qed_dev *qed_probe(struct pci_dev *pdev, 519 struct qed_probe_params *params) 520 { 521 struct qed_dev *cdev; 522 int rc; 523 524 cdev = qed_alloc_cdev(pdev); 525 if (!cdev) 526 goto err0; 527 528 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 529 cdev->protocol = params->protocol; 530 531 if (params->is_vf) 532 cdev->b_is_vf = true; 533 534 qed_init_dp(cdev, params->dp_module, params->dp_level); 535 536 cdev->recov_in_prog = params->recov_in_prog; 537 538 rc = qed_init_pci(cdev, pdev); 539 if (rc) { 540 DP_ERR(cdev, "init pci failed\n"); 541 goto err1; 542 } 543 DP_INFO(cdev, "PCI init completed successfully\n"); 544 545 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 546 if (rc) { 547 DP_ERR(cdev, "hw prepare failed\n"); 548 goto err2; 549 } 550 551 DP_INFO(cdev, "qed_probe completed successfully\n"); 552 553 return cdev; 554 555 err2: 556 qed_free_pci(cdev); 557 err1: 558 qed_free_cdev(cdev); 559 err0: 560 return NULL; 561 } 562 563 static void qed_remove(struct qed_dev *cdev) 564 { 565 if (!cdev) 566 return; 567 568 qed_hw_remove(cdev); 569 570 qed_free_pci(cdev); 571 572 qed_set_power_state(cdev, PCI_D3hot); 573 574 qed_free_cdev(cdev); 575 } 576 577 static void qed_disable_msix(struct qed_dev *cdev) 578 { 579 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 580 pci_disable_msix(cdev->pdev); 581 kfree(cdev->int_params.msix_table); 582 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 583 pci_disable_msi(cdev->pdev); 584 } 585 586 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 587 } 588 589 static int qed_enable_msix(struct qed_dev *cdev, 590 struct qed_int_params *int_params) 591 { 592 int i, rc, cnt; 593 594 cnt = int_params->in.num_vectors; 595 596 for (i = 0; i < cnt; i++) 597 int_params->msix_table[i].entry = i; 598 599 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 600 int_params->in.min_msix_cnt, cnt); 601 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 602 (rc % cdev->num_hwfns)) { 603 pci_disable_msix(cdev->pdev); 604 605 /* If fastpath is initialized, we need at least one interrupt 606 * per hwfn [and the slow path interrupts]. New requested number 607 * should be a multiple of the number of hwfns. 608 */ 609 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 610 DP_NOTICE(cdev, 611 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 612 cnt, int_params->in.num_vectors); 613 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 614 cnt); 615 if (!rc) 616 rc = cnt; 617 } 618 619 /* For VFs, we should return with an error in case we didn't get the 620 * exact number of msix vectors as we requested. 621 * Not doing that will lead to a crash when starting queues for 622 * this VF. 623 */ 624 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { 625 /* MSI-x configuration was achieved */ 626 int_params->out.int_mode = QED_INT_MODE_MSIX; 627 int_params->out.num_vectors = rc; 628 rc = 0; 629 } else { 630 DP_NOTICE(cdev, 631 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 632 cnt, rc); 633 } 634 635 return rc; 636 } 637 638 /* This function outputs the int mode and the number of enabled msix vector */ 639 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 640 { 641 struct qed_int_params *int_params = &cdev->int_params; 642 struct msix_entry *tbl; 643 int rc = 0, cnt; 644 645 switch (int_params->in.int_mode) { 646 case QED_INT_MODE_MSIX: 647 /* Allocate MSIX table */ 648 cnt = int_params->in.num_vectors; 649 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 650 if (!int_params->msix_table) { 651 rc = -ENOMEM; 652 goto out; 653 } 654 655 /* Enable MSIX */ 656 rc = qed_enable_msix(cdev, int_params); 657 if (!rc) 658 goto out; 659 660 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 661 kfree(int_params->msix_table); 662 if (force_mode) 663 goto out; 664 fallthrough; 665 666 case QED_INT_MODE_MSI: 667 if (cdev->num_hwfns == 1) { 668 rc = pci_enable_msi(cdev->pdev); 669 if (!rc) { 670 int_params->out.int_mode = QED_INT_MODE_MSI; 671 goto out; 672 } 673 674 DP_NOTICE(cdev, "Failed to enable MSI\n"); 675 if (force_mode) 676 goto out; 677 } 678 fallthrough; 679 680 case QED_INT_MODE_INTA: 681 int_params->out.int_mode = QED_INT_MODE_INTA; 682 rc = 0; 683 goto out; 684 default: 685 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 686 int_params->in.int_mode); 687 rc = -EINVAL; 688 } 689 690 out: 691 if (!rc) 692 DP_INFO(cdev, "Using %s interrupts\n", 693 int_params->out.int_mode == QED_INT_MODE_INTA ? 694 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 695 "MSI" : "MSIX"); 696 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 697 698 return rc; 699 } 700 701 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 702 int index, void(*handler)(void *)) 703 { 704 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 705 int relative_idx = index / cdev->num_hwfns; 706 707 hwfn->simd_proto_handler[relative_idx].func = handler; 708 hwfn->simd_proto_handler[relative_idx].token = token; 709 } 710 711 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 712 { 713 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 714 int relative_idx = index / cdev->num_hwfns; 715 716 memset(&hwfn->simd_proto_handler[relative_idx], 0, 717 sizeof(struct qed_simd_fp_handler)); 718 } 719 720 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 721 { 722 tasklet_schedule((struct tasklet_struct *)tasklet); 723 return IRQ_HANDLED; 724 } 725 726 static irqreturn_t qed_single_int(int irq, void *dev_instance) 727 { 728 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 729 struct qed_hwfn *hwfn; 730 irqreturn_t rc = IRQ_NONE; 731 u64 status; 732 int i, j; 733 734 for (i = 0; i < cdev->num_hwfns; i++) { 735 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 736 737 if (!status) 738 continue; 739 740 hwfn = &cdev->hwfns[i]; 741 742 /* Slowpath interrupt */ 743 if (unlikely(status & 0x1)) { 744 tasklet_schedule(&hwfn->sp_dpc); 745 status &= ~0x1; 746 rc = IRQ_HANDLED; 747 } 748 749 /* Fastpath interrupts */ 750 for (j = 0; j < 64; j++) { 751 if ((0x2ULL << j) & status) { 752 struct qed_simd_fp_handler *p_handler = 753 &hwfn->simd_proto_handler[j]; 754 755 if (p_handler->func) 756 p_handler->func(p_handler->token); 757 else 758 DP_NOTICE(hwfn, 759 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 760 j, status); 761 762 status &= ~(0x2ULL << j); 763 rc = IRQ_HANDLED; 764 } 765 } 766 767 if (unlikely(status)) 768 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 769 "got an unknown interrupt status 0x%llx\n", 770 status); 771 } 772 773 return rc; 774 } 775 776 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 777 { 778 struct qed_dev *cdev = hwfn->cdev; 779 u32 int_mode; 780 int rc = 0; 781 u8 id; 782 783 int_mode = cdev->int_params.out.int_mode; 784 if (int_mode == QED_INT_MODE_MSIX) { 785 id = hwfn->my_id; 786 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 787 id, cdev->pdev->bus->number, 788 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 789 rc = request_irq(cdev->int_params.msix_table[id].vector, 790 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); 791 } else { 792 unsigned long flags = 0; 793 794 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 795 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 796 PCI_FUNC(cdev->pdev->devfn)); 797 798 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 799 flags |= IRQF_SHARED; 800 801 rc = request_irq(cdev->pdev->irq, qed_single_int, 802 flags, cdev->name, cdev); 803 } 804 805 if (rc) 806 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 807 else 808 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 809 "Requested slowpath %s\n", 810 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 811 812 return rc; 813 } 814 815 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 816 { 817 /* Calling the disable function will make sure that any 818 * currently-running function is completed. The following call to the 819 * enable function makes this sequence a flush-like operation. 820 */ 821 if (p_hwfn->b_sp_dpc_enabled) { 822 tasklet_disable(&p_hwfn->sp_dpc); 823 tasklet_enable(&p_hwfn->sp_dpc); 824 } 825 } 826 827 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 828 { 829 struct qed_dev *cdev = p_hwfn->cdev; 830 u8 id = p_hwfn->my_id; 831 u32 int_mode; 832 833 int_mode = cdev->int_params.out.int_mode; 834 if (int_mode == QED_INT_MODE_MSIX) 835 synchronize_irq(cdev->int_params.msix_table[id].vector); 836 else 837 synchronize_irq(cdev->pdev->irq); 838 839 qed_slowpath_tasklet_flush(p_hwfn); 840 } 841 842 static void qed_slowpath_irq_free(struct qed_dev *cdev) 843 { 844 int i; 845 846 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 847 for_each_hwfn(cdev, i) { 848 if (!cdev->hwfns[i].b_int_requested) 849 break; 850 synchronize_irq(cdev->int_params.msix_table[i].vector); 851 free_irq(cdev->int_params.msix_table[i].vector, 852 &cdev->hwfns[i].sp_dpc); 853 } 854 } else { 855 if (QED_LEADING_HWFN(cdev)->b_int_requested) 856 free_irq(cdev->pdev->irq, cdev); 857 } 858 qed_int_disable_post_isr_release(cdev); 859 } 860 861 static int qed_nic_stop(struct qed_dev *cdev) 862 { 863 int i, rc; 864 865 rc = qed_hw_stop(cdev); 866 867 for (i = 0; i < cdev->num_hwfns; i++) { 868 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 869 870 if (p_hwfn->b_sp_dpc_enabled) { 871 tasklet_disable(&p_hwfn->sp_dpc); 872 p_hwfn->b_sp_dpc_enabled = false; 873 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 874 "Disabled sp tasklet [hwfn %d] at %p\n", 875 i, &p_hwfn->sp_dpc); 876 } 877 } 878 879 qed_dbg_pf_exit(cdev); 880 881 return rc; 882 } 883 884 static int qed_nic_setup(struct qed_dev *cdev) 885 { 886 int rc, i; 887 888 /* Determine if interface is going to require LL2 */ 889 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 890 for (i = 0; i < cdev->num_hwfns; i++) { 891 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 892 893 p_hwfn->using_ll2 = true; 894 } 895 } 896 897 rc = qed_resc_alloc(cdev); 898 if (rc) 899 return rc; 900 901 DP_INFO(cdev, "Allocated qed resources\n"); 902 903 qed_resc_setup(cdev); 904 905 return rc; 906 } 907 908 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 909 { 910 int limit = 0; 911 912 /* Mark the fastpath as free/used */ 913 cdev->int_params.fp_initialized = cnt ? true : false; 914 915 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 916 limit = cdev->num_hwfns * 63; 917 else if (cdev->int_params.fp_msix_cnt) 918 limit = cdev->int_params.fp_msix_cnt; 919 920 if (!limit) 921 return -ENOMEM; 922 923 return min_t(int, cnt, limit); 924 } 925 926 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 927 { 928 memset(info, 0, sizeof(struct qed_int_info)); 929 930 if (!cdev->int_params.fp_initialized) { 931 DP_INFO(cdev, 932 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 933 return -EINVAL; 934 } 935 936 /* Need to expose only MSI-X information; Single IRQ is handled solely 937 * by qed. 938 */ 939 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 940 int msix_base = cdev->int_params.fp_msix_base; 941 942 info->msix_cnt = cdev->int_params.fp_msix_cnt; 943 info->msix = &cdev->int_params.msix_table[msix_base]; 944 } 945 946 return 0; 947 } 948 949 static int qed_slowpath_setup_int(struct qed_dev *cdev, 950 enum qed_int_mode int_mode) 951 { 952 struct qed_sb_cnt_info sb_cnt_info; 953 int num_l2_queues = 0; 954 int rc; 955 int i; 956 957 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 958 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 959 return -EINVAL; 960 } 961 962 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 963 cdev->int_params.in.int_mode = int_mode; 964 for_each_hwfn(cdev, i) { 965 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 966 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 967 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 968 cdev->int_params.in.num_vectors++; /* slowpath */ 969 } 970 971 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 972 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 973 974 if (is_kdump_kernel()) { 975 DP_INFO(cdev, 976 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 977 cdev->int_params.in.min_msix_cnt); 978 cdev->int_params.in.num_vectors = 979 cdev->int_params.in.min_msix_cnt; 980 } 981 982 rc = qed_set_int_mode(cdev, false); 983 if (rc) { 984 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 985 return rc; 986 } 987 988 cdev->int_params.fp_msix_base = cdev->num_hwfns; 989 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 990 cdev->num_hwfns; 991 992 if (!IS_ENABLED(CONFIG_QED_RDMA) || 993 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 994 return 0; 995 996 for_each_hwfn(cdev, i) 997 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 998 999 DP_VERBOSE(cdev, QED_MSG_RDMA, 1000 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 1001 cdev->int_params.fp_msix_cnt, num_l2_queues); 1002 1003 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 1004 cdev->int_params.rdma_msix_cnt = 1005 (cdev->int_params.fp_msix_cnt - num_l2_queues) 1006 / cdev->num_hwfns; 1007 cdev->int_params.rdma_msix_base = 1008 cdev->int_params.fp_msix_base + num_l2_queues; 1009 cdev->int_params.fp_msix_cnt = num_l2_queues; 1010 } else { 1011 cdev->int_params.rdma_msix_cnt = 0; 1012 } 1013 1014 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 1015 cdev->int_params.rdma_msix_cnt, 1016 cdev->int_params.rdma_msix_base); 1017 1018 return 0; 1019 } 1020 1021 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 1022 { 1023 int rc; 1024 1025 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 1026 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 1027 1028 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 1029 &cdev->int_params.in.num_vectors); 1030 if (cdev->num_hwfns > 1) { 1031 u8 vectors = 0; 1032 1033 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 1034 cdev->int_params.in.num_vectors += vectors; 1035 } 1036 1037 /* We want a minimum of one fastpath vector per vf hwfn */ 1038 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1039 1040 rc = qed_set_int_mode(cdev, true); 1041 if (rc) 1042 return rc; 1043 1044 cdev->int_params.fp_msix_base = 0; 1045 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1046 1047 return 0; 1048 } 1049 1050 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1051 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1052 { 1053 int rc; 1054 1055 p_hwfn->stream->next_in = input_buf; 1056 p_hwfn->stream->avail_in = input_len; 1057 p_hwfn->stream->next_out = unzip_buf; 1058 p_hwfn->stream->avail_out = max_size; 1059 1060 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1061 1062 if (rc != Z_OK) { 1063 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1064 rc); 1065 return 0; 1066 } 1067 1068 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1069 zlib_inflateEnd(p_hwfn->stream); 1070 1071 if (rc != Z_OK && rc != Z_STREAM_END) { 1072 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1073 p_hwfn->stream->msg, rc); 1074 return 0; 1075 } 1076 1077 return p_hwfn->stream->total_out / 4; 1078 } 1079 1080 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1081 { 1082 int i; 1083 void *workspace; 1084 1085 for_each_hwfn(cdev, i) { 1086 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1087 1088 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1089 if (!p_hwfn->stream) 1090 return -ENOMEM; 1091 1092 workspace = vzalloc(zlib_inflate_workspacesize()); 1093 if (!workspace) 1094 return -ENOMEM; 1095 p_hwfn->stream->workspace = workspace; 1096 } 1097 1098 return 0; 1099 } 1100 1101 static void qed_free_stream_mem(struct qed_dev *cdev) 1102 { 1103 int i; 1104 1105 for_each_hwfn(cdev, i) { 1106 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1107 1108 if (!p_hwfn->stream) 1109 return; 1110 1111 vfree(p_hwfn->stream->workspace); 1112 kfree(p_hwfn->stream); 1113 } 1114 } 1115 1116 static void qed_update_pf_params(struct qed_dev *cdev, 1117 struct qed_pf_params *params) 1118 { 1119 int i; 1120 1121 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1122 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1123 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1124 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1125 /* divide by 3 the MRs to avoid MF ILT overflow */ 1126 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1127 } 1128 1129 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1130 params->eth_pf_params.num_arfs_filters = 0; 1131 1132 /* In case we might support RDMA, don't allow qede to be greedy 1133 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1134 * per hwfn. 1135 */ 1136 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1137 u16 *num_cons; 1138 1139 num_cons = ¶ms->eth_pf_params.num_cons; 1140 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1141 } 1142 1143 for (i = 0; i < cdev->num_hwfns; i++) { 1144 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1145 1146 p_hwfn->pf_params = *params; 1147 } 1148 } 1149 1150 #define QED_PERIODIC_DB_REC_COUNT 10 1151 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1152 #define QED_PERIODIC_DB_REC_INTERVAL \ 1153 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1154 1155 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1156 enum qed_slowpath_wq_flag wq_flag, 1157 unsigned long delay) 1158 { 1159 if (!hwfn->slowpath_wq_active) 1160 return -EINVAL; 1161 1162 /* Memory barrier for setting atomic bit */ 1163 smp_mb__before_atomic(); 1164 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1165 smp_mb__after_atomic(); 1166 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1167 1168 return 0; 1169 } 1170 1171 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1172 { 1173 /* Reset periodic Doorbell Recovery counter */ 1174 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1175 1176 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1177 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1178 &p_hwfn->slowpath_task_flags)) 1179 return; 1180 1181 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1182 QED_PERIODIC_DB_REC_INTERVAL); 1183 } 1184 1185 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1186 { 1187 int i; 1188 1189 if (IS_VF(cdev)) 1190 return; 1191 1192 for_each_hwfn(cdev, i) { 1193 if (!cdev->hwfns[i].slowpath_wq) 1194 continue; 1195 1196 /* Stop queuing new delayed works */ 1197 cdev->hwfns[i].slowpath_wq_active = false; 1198 1199 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1200 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1201 } 1202 } 1203 1204 static void qed_slowpath_task(struct work_struct *work) 1205 { 1206 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1207 slowpath_task.work); 1208 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1209 1210 if (!ptt) { 1211 if (hwfn->slowpath_wq_active) 1212 queue_delayed_work(hwfn->slowpath_wq, 1213 &hwfn->slowpath_task, 0); 1214 1215 return; 1216 } 1217 1218 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1219 &hwfn->slowpath_task_flags)) 1220 qed_mfw_process_tlv_req(hwfn, ptt); 1221 1222 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1223 &hwfn->slowpath_task_flags)) { 1224 qed_db_rec_handler(hwfn, ptt); 1225 if (hwfn->periodic_db_rec_count--) 1226 qed_slowpath_delayed_work(hwfn, 1227 QED_SLOWPATH_PERIODIC_DB_REC, 1228 QED_PERIODIC_DB_REC_INTERVAL); 1229 } 1230 1231 qed_ptt_release(hwfn, ptt); 1232 } 1233 1234 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1235 { 1236 struct qed_hwfn *hwfn; 1237 char name[NAME_SIZE]; 1238 int i; 1239 1240 if (IS_VF(cdev)) 1241 return 0; 1242 1243 for_each_hwfn(cdev, i) { 1244 hwfn = &cdev->hwfns[i]; 1245 1246 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1247 cdev->pdev->bus->number, 1248 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1249 1250 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1251 if (!hwfn->slowpath_wq) { 1252 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1253 return -ENOMEM; 1254 } 1255 1256 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1257 hwfn->slowpath_wq_active = true; 1258 } 1259 1260 return 0; 1261 } 1262 1263 static int qed_slowpath_start(struct qed_dev *cdev, 1264 struct qed_slowpath_params *params) 1265 { 1266 struct qed_drv_load_params drv_load_params; 1267 struct qed_hw_init_params hw_init_params; 1268 struct qed_mcp_drv_version drv_version; 1269 struct qed_tunnel_info tunn_info; 1270 const u8 *data = NULL; 1271 struct qed_hwfn *hwfn; 1272 struct qed_ptt *p_ptt; 1273 int rc = -EINVAL; 1274 1275 if (qed_iov_wq_start(cdev)) 1276 goto err; 1277 1278 if (qed_slowpath_wq_start(cdev)) 1279 goto err; 1280 1281 if (IS_PF(cdev)) { 1282 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1283 &cdev->pdev->dev); 1284 if (rc) { 1285 DP_NOTICE(cdev, 1286 "Failed to find fw file - /lib/firmware/%s\n", 1287 QED_FW_FILE_NAME); 1288 goto err; 1289 } 1290 1291 if (cdev->num_hwfns == 1) { 1292 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1293 if (p_ptt) { 1294 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1295 } else { 1296 DP_NOTICE(cdev, 1297 "Failed to acquire PTT for aRFS\n"); 1298 goto err; 1299 } 1300 } 1301 } 1302 1303 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1304 rc = qed_nic_setup(cdev); 1305 if (rc) 1306 goto err; 1307 1308 if (IS_PF(cdev)) 1309 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1310 else 1311 rc = qed_slowpath_vf_setup_int(cdev); 1312 if (rc) 1313 goto err1; 1314 1315 if (IS_PF(cdev)) { 1316 /* Allocate stream for unzipping */ 1317 rc = qed_alloc_stream_mem(cdev); 1318 if (rc) 1319 goto err2; 1320 1321 /* First Dword used to differentiate between various sources */ 1322 data = cdev->firmware->data + sizeof(u32); 1323 1324 qed_dbg_pf_init(cdev); 1325 } 1326 1327 /* Start the slowpath */ 1328 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1329 memset(&tunn_info, 0, sizeof(tunn_info)); 1330 tunn_info.vxlan.b_mode_enabled = true; 1331 tunn_info.l2_gre.b_mode_enabled = true; 1332 tunn_info.ip_gre.b_mode_enabled = true; 1333 tunn_info.l2_geneve.b_mode_enabled = true; 1334 tunn_info.ip_geneve.b_mode_enabled = true; 1335 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1336 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1337 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1338 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1339 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1340 hw_init_params.p_tunn = &tunn_info; 1341 hw_init_params.b_hw_start = true; 1342 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1343 hw_init_params.allow_npar_tx_switch = true; 1344 hw_init_params.bin_fw_data = data; 1345 1346 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1347 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1348 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1349 drv_load_params.avoid_eng_reset = false; 1350 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1351 hw_init_params.p_drv_load_params = &drv_load_params; 1352 1353 rc = qed_hw_init(cdev, &hw_init_params); 1354 if (rc) 1355 goto err2; 1356 1357 DP_INFO(cdev, 1358 "HW initialization and function start completed successfully\n"); 1359 1360 if (IS_PF(cdev)) { 1361 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1362 BIT(QED_MODE_L2GENEVE_TUNN) | 1363 BIT(QED_MODE_IPGENEVE_TUNN) | 1364 BIT(QED_MODE_L2GRE_TUNN) | 1365 BIT(QED_MODE_IPGRE_TUNN)); 1366 } 1367 1368 /* Allocate LL2 interface if needed */ 1369 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1370 rc = qed_ll2_alloc_if(cdev); 1371 if (rc) 1372 goto err3; 1373 } 1374 if (IS_PF(cdev)) { 1375 hwfn = QED_LEADING_HWFN(cdev); 1376 drv_version.version = (params->drv_major << 24) | 1377 (params->drv_minor << 16) | 1378 (params->drv_rev << 8) | 1379 (params->drv_eng); 1380 strlcpy(drv_version.name, params->name, 1381 MCP_DRV_VER_STR_SIZE - 4); 1382 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1383 &drv_version); 1384 if (rc) { 1385 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1386 goto err4; 1387 } 1388 } 1389 1390 qed_reset_vport_stats(cdev); 1391 1392 return 0; 1393 1394 err4: 1395 qed_ll2_dealloc_if(cdev); 1396 err3: 1397 qed_hw_stop(cdev); 1398 err2: 1399 qed_hw_timers_stop_all(cdev); 1400 if (IS_PF(cdev)) 1401 qed_slowpath_irq_free(cdev); 1402 qed_free_stream_mem(cdev); 1403 qed_disable_msix(cdev); 1404 err1: 1405 qed_resc_free(cdev); 1406 err: 1407 if (IS_PF(cdev)) 1408 release_firmware(cdev->firmware); 1409 1410 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1411 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1412 qed_ptt_release(QED_LEADING_HWFN(cdev), 1413 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1414 1415 qed_iov_wq_stop(cdev, false); 1416 1417 qed_slowpath_wq_stop(cdev); 1418 1419 return rc; 1420 } 1421 1422 static int qed_slowpath_stop(struct qed_dev *cdev) 1423 { 1424 if (!cdev) 1425 return -ENODEV; 1426 1427 qed_slowpath_wq_stop(cdev); 1428 1429 qed_ll2_dealloc_if(cdev); 1430 1431 if (IS_PF(cdev)) { 1432 if (cdev->num_hwfns == 1) 1433 qed_ptt_release(QED_LEADING_HWFN(cdev), 1434 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1435 qed_free_stream_mem(cdev); 1436 if (IS_QED_ETH_IF(cdev)) 1437 qed_sriov_disable(cdev, true); 1438 } 1439 1440 qed_nic_stop(cdev); 1441 1442 if (IS_PF(cdev)) 1443 qed_slowpath_irq_free(cdev); 1444 1445 qed_disable_msix(cdev); 1446 1447 qed_resc_free(cdev); 1448 1449 qed_iov_wq_stop(cdev, true); 1450 1451 if (IS_PF(cdev)) 1452 release_firmware(cdev->firmware); 1453 1454 return 0; 1455 } 1456 1457 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1458 { 1459 int i; 1460 1461 memcpy(cdev->name, name, NAME_SIZE); 1462 for_each_hwfn(cdev, i) 1463 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1464 } 1465 1466 static u32 qed_sb_init(struct qed_dev *cdev, 1467 struct qed_sb_info *sb_info, 1468 void *sb_virt_addr, 1469 dma_addr_t sb_phy_addr, u16 sb_id, 1470 enum qed_sb_type type) 1471 { 1472 struct qed_hwfn *p_hwfn; 1473 struct qed_ptt *p_ptt; 1474 u16 rel_sb_id; 1475 u32 rc; 1476 1477 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1478 if (type == QED_SB_TYPE_L2_QUEUE) { 1479 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1480 rel_sb_id = sb_id / cdev->num_hwfns; 1481 } else { 1482 p_hwfn = QED_AFFIN_HWFN(cdev); 1483 rel_sb_id = sb_id; 1484 } 1485 1486 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1487 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1488 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1489 1490 if (IS_PF(p_hwfn->cdev)) { 1491 p_ptt = qed_ptt_acquire(p_hwfn); 1492 if (!p_ptt) 1493 return -EBUSY; 1494 1495 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1496 sb_phy_addr, rel_sb_id); 1497 qed_ptt_release(p_hwfn, p_ptt); 1498 } else { 1499 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1500 sb_phy_addr, rel_sb_id); 1501 } 1502 1503 return rc; 1504 } 1505 1506 static u32 qed_sb_release(struct qed_dev *cdev, 1507 struct qed_sb_info *sb_info, 1508 u16 sb_id, 1509 enum qed_sb_type type) 1510 { 1511 struct qed_hwfn *p_hwfn; 1512 u16 rel_sb_id; 1513 u32 rc; 1514 1515 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1516 if (type == QED_SB_TYPE_L2_QUEUE) { 1517 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1518 rel_sb_id = sb_id / cdev->num_hwfns; 1519 } else { 1520 p_hwfn = QED_AFFIN_HWFN(cdev); 1521 rel_sb_id = sb_id; 1522 } 1523 1524 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1525 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1526 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1527 1528 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1529 1530 return rc; 1531 } 1532 1533 static bool qed_can_link_change(struct qed_dev *cdev) 1534 { 1535 return true; 1536 } 1537 1538 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1539 const struct qed_link_params *params) 1540 { 1541 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1542 const struct qed_mfw_speed_map *map; 1543 u32 i; 1544 1545 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1546 ext_speed->autoneg = !!params->autoneg; 1547 1548 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1549 ext_speed->advertised_speeds = 0; 1550 1551 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1552 map = qed_mfw_ext_maps + i; 1553 1554 if (linkmode_intersects(params->adv_speeds, map->caps)) 1555 ext_speed->advertised_speeds |= map->mfw_val; 1556 } 1557 } 1558 1559 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1560 switch (params->forced_speed) { 1561 case SPEED_1000: 1562 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1563 break; 1564 case SPEED_10000: 1565 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1566 break; 1567 case SPEED_20000: 1568 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1569 break; 1570 case SPEED_25000: 1571 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1572 break; 1573 case SPEED_40000: 1574 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1575 break; 1576 case SPEED_50000: 1577 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1578 QED_EXT_SPEED_50G_R2; 1579 break; 1580 case SPEED_100000: 1581 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1582 QED_EXT_SPEED_100G_R4 | 1583 QED_EXT_SPEED_100G_P4; 1584 break; 1585 default: 1586 break; 1587 } 1588 } 1589 1590 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1591 return; 1592 1593 switch (params->forced_speed) { 1594 case SPEED_25000: 1595 switch (params->fec) { 1596 case FEC_FORCE_MODE_NONE: 1597 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1598 break; 1599 case FEC_FORCE_MODE_FIRECODE: 1600 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1601 break; 1602 case FEC_FORCE_MODE_RS: 1603 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1604 break; 1605 case FEC_FORCE_MODE_AUTO: 1606 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1607 ETH_EXT_FEC_25G_BASE_R | 1608 ETH_EXT_FEC_25G_NONE; 1609 break; 1610 default: 1611 break; 1612 } 1613 1614 break; 1615 case SPEED_40000: 1616 switch (params->fec) { 1617 case FEC_FORCE_MODE_NONE: 1618 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1619 break; 1620 case FEC_FORCE_MODE_FIRECODE: 1621 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1622 break; 1623 case FEC_FORCE_MODE_AUTO: 1624 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1625 ETH_EXT_FEC_40G_NONE; 1626 break; 1627 default: 1628 break; 1629 } 1630 1631 break; 1632 case SPEED_50000: 1633 switch (params->fec) { 1634 case FEC_FORCE_MODE_NONE: 1635 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1636 break; 1637 case FEC_FORCE_MODE_FIRECODE: 1638 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1639 break; 1640 case FEC_FORCE_MODE_RS: 1641 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1642 break; 1643 case FEC_FORCE_MODE_AUTO: 1644 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1645 ETH_EXT_FEC_50G_BASE_R | 1646 ETH_EXT_FEC_50G_NONE; 1647 break; 1648 default: 1649 break; 1650 } 1651 1652 break; 1653 case SPEED_100000: 1654 switch (params->fec) { 1655 case FEC_FORCE_MODE_NONE: 1656 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1657 break; 1658 case FEC_FORCE_MODE_FIRECODE: 1659 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1660 break; 1661 case FEC_FORCE_MODE_RS: 1662 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1663 break; 1664 case FEC_FORCE_MODE_AUTO: 1665 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1666 ETH_EXT_FEC_100G_BASE_R | 1667 ETH_EXT_FEC_100G_NONE; 1668 break; 1669 default: 1670 break; 1671 } 1672 1673 break; 1674 default: 1675 break; 1676 } 1677 } 1678 1679 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1680 { 1681 struct qed_mcp_link_params *link_params; 1682 struct qed_mcp_link_speed_params *speed; 1683 const struct qed_mfw_speed_map *map; 1684 struct qed_hwfn *hwfn; 1685 struct qed_ptt *ptt; 1686 int rc; 1687 u32 i; 1688 1689 if (!cdev) 1690 return -ENODEV; 1691 1692 /* The link should be set only once per PF */ 1693 hwfn = &cdev->hwfns[0]; 1694 1695 /* When VF wants to set link, force it to read the bulletin instead. 1696 * This mimics the PF behavior, where a noitification [both immediate 1697 * and possible later] would be generated when changing properties. 1698 */ 1699 if (IS_VF(cdev)) { 1700 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1701 return 0; 1702 } 1703 1704 ptt = qed_ptt_acquire(hwfn); 1705 if (!ptt) 1706 return -EBUSY; 1707 1708 link_params = qed_mcp_get_link_params(hwfn); 1709 if (!link_params) 1710 return -ENODATA; 1711 1712 speed = &link_params->speed; 1713 1714 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1715 speed->autoneg = !!params->autoneg; 1716 1717 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1718 speed->advertised_speeds = 0; 1719 1720 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1721 map = qed_mfw_legacy_maps + i; 1722 1723 if (linkmode_intersects(params->adv_speeds, map->caps)) 1724 speed->advertised_speeds |= map->mfw_val; 1725 } 1726 } 1727 1728 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1729 speed->forced_speed = params->forced_speed; 1730 1731 if (qed_mcp_is_ext_speed_supported(hwfn)) 1732 qed_set_ext_speed_params(link_params, params); 1733 1734 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1735 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1736 link_params->pause.autoneg = true; 1737 else 1738 link_params->pause.autoneg = false; 1739 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1740 link_params->pause.forced_rx = true; 1741 else 1742 link_params->pause.forced_rx = false; 1743 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1744 link_params->pause.forced_tx = true; 1745 else 1746 link_params->pause.forced_tx = false; 1747 } 1748 1749 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1750 switch (params->loopback_mode) { 1751 case QED_LINK_LOOPBACK_INT_PHY: 1752 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1753 break; 1754 case QED_LINK_LOOPBACK_EXT_PHY: 1755 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1756 break; 1757 case QED_LINK_LOOPBACK_EXT: 1758 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1759 break; 1760 case QED_LINK_LOOPBACK_MAC: 1761 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1762 break; 1763 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1764 link_params->loopback_mode = 1765 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1766 break; 1767 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1768 link_params->loopback_mode = 1769 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1770 break; 1771 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1772 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1773 break; 1774 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1775 link_params->loopback_mode = 1776 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1777 break; 1778 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1779 link_params->loopback_mode = 1780 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1781 break; 1782 default: 1783 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1784 break; 1785 } 1786 } 1787 1788 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1789 memcpy(&link_params->eee, ¶ms->eee, 1790 sizeof(link_params->eee)); 1791 1792 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1793 link_params->fec = params->fec; 1794 1795 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1796 1797 qed_ptt_release(hwfn, ptt); 1798 1799 return rc; 1800 } 1801 1802 static int qed_get_port_type(u32 media_type) 1803 { 1804 int port_type; 1805 1806 switch (media_type) { 1807 case MEDIA_SFPP_10G_FIBER: 1808 case MEDIA_SFP_1G_FIBER: 1809 case MEDIA_XFP_FIBER: 1810 case MEDIA_MODULE_FIBER: 1811 port_type = PORT_FIBRE; 1812 break; 1813 case MEDIA_DA_TWINAX: 1814 port_type = PORT_DA; 1815 break; 1816 case MEDIA_BASE_T: 1817 port_type = PORT_TP; 1818 break; 1819 case MEDIA_KR: 1820 case MEDIA_NOT_PRESENT: 1821 port_type = PORT_NONE; 1822 break; 1823 case MEDIA_UNSPECIFIED: 1824 default: 1825 port_type = PORT_OTHER; 1826 break; 1827 } 1828 return port_type; 1829 } 1830 1831 static int qed_get_link_data(struct qed_hwfn *hwfn, 1832 struct qed_mcp_link_params *params, 1833 struct qed_mcp_link_state *link, 1834 struct qed_mcp_link_capabilities *link_caps) 1835 { 1836 void *p; 1837 1838 if (!IS_PF(hwfn->cdev)) { 1839 qed_vf_get_link_params(hwfn, params); 1840 qed_vf_get_link_state(hwfn, link); 1841 qed_vf_get_link_caps(hwfn, link_caps); 1842 1843 return 0; 1844 } 1845 1846 p = qed_mcp_get_link_params(hwfn); 1847 if (!p) 1848 return -ENXIO; 1849 memcpy(params, p, sizeof(*params)); 1850 1851 p = qed_mcp_get_link_state(hwfn); 1852 if (!p) 1853 return -ENXIO; 1854 memcpy(link, p, sizeof(*link)); 1855 1856 p = qed_mcp_get_link_capabilities(hwfn); 1857 if (!p) 1858 return -ENXIO; 1859 memcpy(link_caps, p, sizeof(*link_caps)); 1860 1861 return 0; 1862 } 1863 1864 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1865 struct qed_ptt *ptt, u32 capability, 1866 unsigned long *if_caps) 1867 { 1868 u32 media_type, tcvr_state, tcvr_type; 1869 u32 speed_mask, board_cfg; 1870 1871 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1872 media_type = MEDIA_UNSPECIFIED; 1873 1874 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1875 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1876 1877 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1878 speed_mask = 0xFFFFFFFF; 1879 1880 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1881 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1882 1883 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1884 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1885 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1886 1887 switch (media_type) { 1888 case MEDIA_DA_TWINAX: 1889 phylink_set(if_caps, FIBRE); 1890 1891 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1892 phylink_set(if_caps, 20000baseKR2_Full); 1893 1894 /* For DAC media multiple speed capabilities are supported */ 1895 capability |= speed_mask; 1896 1897 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1898 phylink_set(if_caps, 1000baseKX_Full); 1899 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1900 phylink_set(if_caps, 10000baseCR_Full); 1901 1902 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1903 switch (tcvr_type) { 1904 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1905 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1906 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1907 phylink_set(if_caps, 40000baseCR4_Full); 1908 break; 1909 default: 1910 break; 1911 } 1912 1913 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1914 phylink_set(if_caps, 25000baseCR_Full); 1915 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1916 phylink_set(if_caps, 50000baseCR2_Full); 1917 1918 if (capability & 1919 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1920 switch (tcvr_type) { 1921 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1922 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1923 phylink_set(if_caps, 100000baseCR4_Full); 1924 break; 1925 default: 1926 break; 1927 } 1928 1929 break; 1930 case MEDIA_BASE_T: 1931 phylink_set(if_caps, TP); 1932 1933 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1934 if (capability & 1935 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1936 phylink_set(if_caps, 1000baseT_Full); 1937 if (capability & 1938 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1939 phylink_set(if_caps, 10000baseT_Full); 1940 } 1941 1942 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1943 phylink_set(if_caps, FIBRE); 1944 1945 switch (tcvr_type) { 1946 case ETH_TRANSCEIVER_TYPE_1000BASET: 1947 phylink_set(if_caps, 1000baseT_Full); 1948 break; 1949 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1950 phylink_set(if_caps, 10000baseT_Full); 1951 break; 1952 default: 1953 break; 1954 } 1955 } 1956 1957 break; 1958 case MEDIA_SFP_1G_FIBER: 1959 case MEDIA_SFPP_10G_FIBER: 1960 case MEDIA_XFP_FIBER: 1961 case MEDIA_MODULE_FIBER: 1962 phylink_set(if_caps, FIBRE); 1963 capability |= speed_mask; 1964 1965 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1966 switch (tcvr_type) { 1967 case ETH_TRANSCEIVER_TYPE_1G_LX: 1968 case ETH_TRANSCEIVER_TYPE_1G_SX: 1969 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1970 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1971 phylink_set(if_caps, 1000baseKX_Full); 1972 break; 1973 default: 1974 break; 1975 } 1976 1977 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1978 switch (tcvr_type) { 1979 case ETH_TRANSCEIVER_TYPE_10G_SR: 1980 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1981 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1982 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1983 phylink_set(if_caps, 10000baseSR_Full); 1984 break; 1985 case ETH_TRANSCEIVER_TYPE_10G_LR: 1986 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1987 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1988 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1989 phylink_set(if_caps, 10000baseLR_Full); 1990 break; 1991 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1992 phylink_set(if_caps, 10000baseLRM_Full); 1993 break; 1994 case ETH_TRANSCEIVER_TYPE_10G_ER: 1995 phylink_set(if_caps, 10000baseR_FEC); 1996 break; 1997 default: 1998 break; 1999 } 2000 2001 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2002 phylink_set(if_caps, 20000baseKR2_Full); 2003 2004 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2005 switch (tcvr_type) { 2006 case ETH_TRANSCEIVER_TYPE_25G_SR: 2007 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 2008 phylink_set(if_caps, 25000baseSR_Full); 2009 break; 2010 default: 2011 break; 2012 } 2013 2014 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2015 switch (tcvr_type) { 2016 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2017 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2018 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2019 phylink_set(if_caps, 40000baseLR4_Full); 2020 break; 2021 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2022 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2023 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2024 phylink_set(if_caps, 40000baseSR4_Full); 2025 break; 2026 default: 2027 break; 2028 } 2029 2030 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2031 phylink_set(if_caps, 50000baseKR2_Full); 2032 2033 if (capability & 2034 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2035 switch (tcvr_type) { 2036 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2037 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2038 phylink_set(if_caps, 100000baseSR4_Full); 2039 break; 2040 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2041 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2042 break; 2043 default: 2044 break; 2045 } 2046 2047 break; 2048 case MEDIA_KR: 2049 phylink_set(if_caps, Backplane); 2050 2051 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2052 phylink_set(if_caps, 20000baseKR2_Full); 2053 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2054 phylink_set(if_caps, 1000baseKX_Full); 2055 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2056 phylink_set(if_caps, 10000baseKR_Full); 2057 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2058 phylink_set(if_caps, 25000baseKR_Full); 2059 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2060 phylink_set(if_caps, 40000baseKR4_Full); 2061 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2062 phylink_set(if_caps, 50000baseKR2_Full); 2063 if (capability & 2064 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2065 phylink_set(if_caps, 100000baseKR4_Full); 2066 2067 break; 2068 case MEDIA_UNSPECIFIED: 2069 case MEDIA_NOT_PRESENT: 2070 default: 2071 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2072 "Unknown media and transceiver type;\n"); 2073 break; 2074 } 2075 } 2076 2077 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2078 { 2079 *speed_mask = 0; 2080 2081 if (caps & 2082 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2083 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2084 if (caps & QED_LINK_PARTNER_SPEED_10G) 2085 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2086 if (caps & QED_LINK_PARTNER_SPEED_20G) 2087 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2088 if (caps & QED_LINK_PARTNER_SPEED_25G) 2089 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2090 if (caps & QED_LINK_PARTNER_SPEED_40G) 2091 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2092 if (caps & QED_LINK_PARTNER_SPEED_50G) 2093 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2094 if (caps & QED_LINK_PARTNER_SPEED_100G) 2095 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2096 } 2097 2098 static void qed_fill_link(struct qed_hwfn *hwfn, 2099 struct qed_ptt *ptt, 2100 struct qed_link_output *if_link) 2101 { 2102 struct qed_mcp_link_capabilities link_caps; 2103 struct qed_mcp_link_params params; 2104 struct qed_mcp_link_state link; 2105 u32 media_type, speed_mask; 2106 2107 memset(if_link, 0, sizeof(*if_link)); 2108 2109 /* Prepare source inputs */ 2110 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2111 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2112 return; 2113 } 2114 2115 /* Set the link parameters to pass to protocol driver */ 2116 if (link.link_up) 2117 if_link->link_up = true; 2118 2119 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2120 if (link_caps.default_ext_autoneg) 2121 phylink_set(if_link->supported_caps, Autoneg); 2122 2123 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2124 2125 if (params.ext_speed.autoneg) 2126 phylink_set(if_link->advertised_caps, Autoneg); 2127 else 2128 phylink_clear(if_link->advertised_caps, Autoneg); 2129 2130 qed_fill_link_capability(hwfn, ptt, 2131 params.ext_speed.advertised_speeds, 2132 if_link->advertised_caps); 2133 } else { 2134 if (link_caps.default_speed_autoneg) 2135 phylink_set(if_link->supported_caps, Autoneg); 2136 2137 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2138 2139 if (params.speed.autoneg) 2140 phylink_set(if_link->advertised_caps, Autoneg); 2141 else 2142 phylink_clear(if_link->advertised_caps, Autoneg); 2143 } 2144 2145 if (params.pause.autoneg || 2146 (params.pause.forced_rx && params.pause.forced_tx)) 2147 phylink_set(if_link->supported_caps, Asym_Pause); 2148 if (params.pause.autoneg || params.pause.forced_rx || 2149 params.pause.forced_tx) 2150 phylink_set(if_link->supported_caps, Pause); 2151 2152 if_link->sup_fec = link_caps.fec_default; 2153 if_link->active_fec = params.fec; 2154 2155 /* Fill link advertised capability */ 2156 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2157 if_link->advertised_caps); 2158 2159 /* Fill link supported capability */ 2160 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2161 if_link->supported_caps); 2162 2163 /* Fill partner advertised capability */ 2164 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2165 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2166 2167 if (link.link_up) 2168 if_link->speed = link.speed; 2169 2170 /* TODO - fill duplex properly */ 2171 if_link->duplex = DUPLEX_FULL; 2172 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2173 if_link->port = qed_get_port_type(media_type); 2174 2175 if_link->autoneg = params.speed.autoneg; 2176 2177 if (params.pause.autoneg) 2178 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2179 if (params.pause.forced_rx) 2180 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2181 if (params.pause.forced_tx) 2182 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2183 2184 if (link.an_complete) 2185 phylink_set(if_link->lp_caps, Autoneg); 2186 if (link.partner_adv_pause) 2187 phylink_set(if_link->lp_caps, Pause); 2188 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2189 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2190 phylink_set(if_link->lp_caps, Asym_Pause); 2191 2192 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2193 if_link->eee_supported = false; 2194 } else { 2195 if_link->eee_supported = true; 2196 if_link->eee_active = link.eee_active; 2197 if_link->sup_caps = link_caps.eee_speed_caps; 2198 /* MFW clears adv_caps on eee disable; use configured value */ 2199 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2200 params.eee.adv_caps; 2201 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2202 if_link->eee.enable = params.eee.enable; 2203 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2204 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2205 } 2206 } 2207 2208 static void qed_get_current_link(struct qed_dev *cdev, 2209 struct qed_link_output *if_link) 2210 { 2211 struct qed_hwfn *hwfn; 2212 struct qed_ptt *ptt; 2213 int i; 2214 2215 hwfn = &cdev->hwfns[0]; 2216 if (IS_PF(cdev)) { 2217 ptt = qed_ptt_acquire(hwfn); 2218 if (ptt) { 2219 qed_fill_link(hwfn, ptt, if_link); 2220 qed_ptt_release(hwfn, ptt); 2221 } else { 2222 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2223 } 2224 } else { 2225 qed_fill_link(hwfn, NULL, if_link); 2226 } 2227 2228 for_each_hwfn(cdev, i) 2229 qed_inform_vf_link_state(&cdev->hwfns[i]); 2230 } 2231 2232 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2233 { 2234 void *cookie = hwfn->cdev->ops_cookie; 2235 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2236 struct qed_link_output if_link; 2237 2238 qed_fill_link(hwfn, ptt, &if_link); 2239 qed_inform_vf_link_state(hwfn); 2240 2241 if (IS_LEAD_HWFN(hwfn) && cookie) 2242 op->link_update(cookie, &if_link); 2243 } 2244 2245 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2246 { 2247 void *cookie = hwfn->cdev->ops_cookie; 2248 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2249 2250 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2251 op->bw_update(cookie); 2252 } 2253 2254 static int qed_drain(struct qed_dev *cdev) 2255 { 2256 struct qed_hwfn *hwfn; 2257 struct qed_ptt *ptt; 2258 int i, rc; 2259 2260 if (IS_VF(cdev)) 2261 return 0; 2262 2263 for_each_hwfn(cdev, i) { 2264 hwfn = &cdev->hwfns[i]; 2265 ptt = qed_ptt_acquire(hwfn); 2266 if (!ptt) { 2267 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2268 return -EBUSY; 2269 } 2270 rc = qed_mcp_drain(hwfn, ptt); 2271 qed_ptt_release(hwfn, ptt); 2272 if (rc) 2273 return rc; 2274 } 2275 2276 return 0; 2277 } 2278 2279 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2280 struct qed_nvm_image_att *nvm_image, 2281 u32 *crc) 2282 { 2283 u8 *buf = NULL; 2284 int rc; 2285 2286 /* Allocate a buffer for holding the nvram image */ 2287 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2288 if (!buf) 2289 return -ENOMEM; 2290 2291 /* Read image into buffer */ 2292 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2293 buf, nvm_image->length); 2294 if (rc) { 2295 DP_ERR(cdev, "Failed reading image from nvm\n"); 2296 goto out; 2297 } 2298 2299 /* Convert the buffer into big-endian format (excluding the 2300 * closing 4 bytes of CRC). 2301 */ 2302 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2303 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2304 2305 /* Calc CRC for the "actual" image buffer, i.e. not including 2306 * the last 4 CRC bytes. 2307 */ 2308 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2309 *crc = (__force u32)cpu_to_be32p(crc); 2310 2311 out: 2312 kfree(buf); 2313 2314 return rc; 2315 } 2316 2317 /* Binary file format - 2318 * /----------------------------------------------------------------------\ 2319 * 0B | 0x4 [command index] | 2320 * 4B | image_type | Options | Number of register settings | 2321 * 8B | Value | 2322 * 12B | Mask | 2323 * 16B | Offset | 2324 * \----------------------------------------------------------------------/ 2325 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2326 * Options - 0'b - Calculate & Update CRC for image 2327 */ 2328 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2329 bool *check_resp) 2330 { 2331 struct qed_nvm_image_att nvm_image; 2332 struct qed_hwfn *p_hwfn; 2333 bool is_crc = false; 2334 u32 image_type; 2335 int rc = 0, i; 2336 u16 len; 2337 2338 *data += 4; 2339 image_type = **data; 2340 p_hwfn = QED_LEADING_HWFN(cdev); 2341 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2342 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2343 break; 2344 if (i == p_hwfn->nvm_info.num_images) { 2345 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2346 image_type); 2347 return -ENOENT; 2348 } 2349 2350 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2351 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2352 2353 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2354 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2355 **data, image_type, nvm_image.start_addr, 2356 nvm_image.start_addr + nvm_image.length - 1); 2357 (*data)++; 2358 is_crc = !!(**data & BIT(0)); 2359 (*data)++; 2360 len = *((u16 *)*data); 2361 *data += 2; 2362 if (is_crc) { 2363 u32 crc = 0; 2364 2365 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2366 if (rc) { 2367 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2368 goto exit; 2369 } 2370 2371 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2372 (nvm_image.start_addr + 2373 nvm_image.length - 4), (u8 *)&crc, 4); 2374 if (rc) 2375 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2376 nvm_image.start_addr + nvm_image.length - 4, rc); 2377 goto exit; 2378 } 2379 2380 /* Iterate over the values for setting */ 2381 while (len) { 2382 u32 offset, mask, value, cur_value; 2383 u8 buf[4]; 2384 2385 value = *((u32 *)*data); 2386 *data += 4; 2387 mask = *((u32 *)*data); 2388 *data += 4; 2389 offset = *((u32 *)*data); 2390 *data += 4; 2391 2392 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2393 4); 2394 if (rc) { 2395 DP_ERR(cdev, "Failed reading from %08x\n", 2396 nvm_image.start_addr + offset); 2397 goto exit; 2398 } 2399 2400 cur_value = le32_to_cpu(*((__le32 *)buf)); 2401 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2402 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2403 nvm_image.start_addr + offset, cur_value, 2404 (cur_value & ~mask) | (value & mask), value, mask); 2405 value = (value & mask) | (cur_value & ~mask); 2406 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2407 nvm_image.start_addr + offset, 2408 (u8 *)&value, 4); 2409 if (rc) { 2410 DP_ERR(cdev, "Failed writing to %08x\n", 2411 nvm_image.start_addr + offset); 2412 goto exit; 2413 } 2414 2415 len--; 2416 } 2417 exit: 2418 return rc; 2419 } 2420 2421 /* Binary file format - 2422 * /----------------------------------------------------------------------\ 2423 * 0B | 0x3 [command index] | 2424 * 4B | b'0: check_response? | b'1-31 reserved | 2425 * 8B | File-type | reserved | 2426 * 12B | Image length in bytes | 2427 * \----------------------------------------------------------------------/ 2428 * Start a new file of the provided type 2429 */ 2430 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2431 const u8 **data, bool *check_resp) 2432 { 2433 u32 file_type, file_size = 0; 2434 int rc; 2435 2436 *data += 4; 2437 *check_resp = !!(**data & BIT(0)); 2438 *data += 4; 2439 file_type = **data; 2440 2441 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2442 "About to start a new file of type %02x\n", file_type); 2443 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2444 *data += 4; 2445 file_size = *((u32 *)(*data)); 2446 } 2447 2448 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2449 (u8 *)(&file_size), 4); 2450 *data += 4; 2451 2452 return rc; 2453 } 2454 2455 /* Binary file format - 2456 * /----------------------------------------------------------------------\ 2457 * 0B | 0x2 [command index] | 2458 * 4B | Length in bytes | 2459 * 8B | b'0: check_response? | b'1-31 reserved | 2460 * 12B | Offset in bytes | 2461 * 16B | Data ... | 2462 * \----------------------------------------------------------------------/ 2463 * Write data as part of a file that was previously started. Data should be 2464 * of length equal to that provided in the message 2465 */ 2466 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2467 const u8 **data, bool *check_resp) 2468 { 2469 u32 offset, len; 2470 int rc; 2471 2472 *data += 4; 2473 len = *((u32 *)(*data)); 2474 *data += 4; 2475 *check_resp = !!(**data & BIT(0)); 2476 *data += 4; 2477 offset = *((u32 *)(*data)); 2478 *data += 4; 2479 2480 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2481 "About to write File-data: %08x bytes to offset %08x\n", 2482 len, offset); 2483 2484 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2485 (char *)(*data), len); 2486 *data += len; 2487 2488 return rc; 2489 } 2490 2491 /* Binary file format [General header] - 2492 * /----------------------------------------------------------------------\ 2493 * 0B | QED_NVM_SIGNATURE | 2494 * 4B | Length in bytes | 2495 * 8B | Highest command in this batchfile | Reserved | 2496 * \----------------------------------------------------------------------/ 2497 */ 2498 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2499 const struct firmware *image, 2500 const u8 **data) 2501 { 2502 u32 signature, len; 2503 2504 /* Check minimum size */ 2505 if (image->size < 12) { 2506 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2507 return -EINVAL; 2508 } 2509 2510 /* Check signature */ 2511 signature = *((u32 *)(*data)); 2512 if (signature != QED_NVM_SIGNATURE) { 2513 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2514 return -EINVAL; 2515 } 2516 2517 *data += 4; 2518 /* Validate internal size equals the image-size */ 2519 len = *((u32 *)(*data)); 2520 if (len != image->size) { 2521 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2522 len, (u32)image->size); 2523 return -EINVAL; 2524 } 2525 2526 *data += 4; 2527 /* Make sure driver familiar with all commands necessary for this */ 2528 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2529 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2530 *((u16 *)(*data))); 2531 return -EINVAL; 2532 } 2533 2534 *data += 4; 2535 2536 return 0; 2537 } 2538 2539 /* Binary file format - 2540 * /----------------------------------------------------------------------\ 2541 * 0B | 0x5 [command index] | 2542 * 4B | Number of config attributes | Reserved | 2543 * 4B | Config ID | Entity ID | Length | 2544 * 4B | Value | 2545 * | | 2546 * \----------------------------------------------------------------------/ 2547 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2548 * 'Number of config attributes'. 2549 * 2550 * The API parses config attributes from the user provided buffer and flashes 2551 * them to the respective NVM path using Management FW inerface. 2552 */ 2553 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2554 { 2555 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2556 u8 entity_id, len, buf[32]; 2557 bool need_nvm_init = true; 2558 struct qed_ptt *ptt; 2559 u16 cfg_id, count; 2560 int rc = 0, i; 2561 u32 flags; 2562 2563 ptt = qed_ptt_acquire(hwfn); 2564 if (!ptt) 2565 return -EAGAIN; 2566 2567 /* NVM CFG ID attribute header */ 2568 *data += 4; 2569 count = *((u16 *)*data); 2570 *data += 4; 2571 2572 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2573 "Read config ids: num_attrs = %0d\n", count); 2574 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2575 * arithmetic operations in the implementation. 2576 */ 2577 for (i = 1; i <= count; i++) { 2578 cfg_id = *((u16 *)*data); 2579 *data += 2; 2580 entity_id = **data; 2581 (*data)++; 2582 len = **data; 2583 (*data)++; 2584 memcpy(buf, *data, len); 2585 *data += len; 2586 2587 flags = 0; 2588 if (need_nvm_init) { 2589 flags |= QED_NVM_CFG_OPTION_INIT; 2590 need_nvm_init = false; 2591 } 2592 2593 /* Commit to flash and free the resources */ 2594 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2595 flags |= QED_NVM_CFG_OPTION_COMMIT | 2596 QED_NVM_CFG_OPTION_FREE; 2597 need_nvm_init = true; 2598 } 2599 2600 if (entity_id) 2601 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2602 2603 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2604 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2605 entity_id, len); 2606 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2607 buf, len); 2608 if (rc) { 2609 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2610 break; 2611 } 2612 } 2613 2614 qed_ptt_release(hwfn, ptt); 2615 2616 return rc; 2617 } 2618 2619 #define QED_MAX_NVM_BUF_LEN 32 2620 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2621 { 2622 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2623 u8 buf[QED_MAX_NVM_BUF_LEN]; 2624 struct qed_ptt *ptt; 2625 u32 len; 2626 int rc; 2627 2628 ptt = qed_ptt_acquire(hwfn); 2629 if (!ptt) 2630 return QED_MAX_NVM_BUF_LEN; 2631 2632 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2633 &len); 2634 if (rc || !len) { 2635 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2636 len = QED_MAX_NVM_BUF_LEN; 2637 } 2638 2639 qed_ptt_release(hwfn, ptt); 2640 2641 return len; 2642 } 2643 2644 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2645 u32 cmd, u32 entity_id) 2646 { 2647 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2648 struct qed_ptt *ptt; 2649 u32 flags, len; 2650 int rc = 0; 2651 2652 ptt = qed_ptt_acquire(hwfn); 2653 if (!ptt) 2654 return -EAGAIN; 2655 2656 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2657 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2658 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2659 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2660 if (rc) 2661 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2662 2663 qed_ptt_release(hwfn, ptt); 2664 2665 return rc; 2666 } 2667 2668 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2669 { 2670 const struct firmware *image; 2671 const u8 *data, *data_end; 2672 u32 cmd_type; 2673 int rc; 2674 2675 rc = request_firmware(&image, name, &cdev->pdev->dev); 2676 if (rc) { 2677 DP_ERR(cdev, "Failed to find '%s'\n", name); 2678 return rc; 2679 } 2680 2681 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2682 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2683 name, image->data, (u32)image->size); 2684 data = image->data; 2685 data_end = data + image->size; 2686 2687 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2688 if (rc) 2689 goto exit; 2690 2691 while (data < data_end) { 2692 bool check_resp = false; 2693 2694 /* Parse the actual command */ 2695 cmd_type = *((u32 *)data); 2696 switch (cmd_type) { 2697 case QED_NVM_FLASH_CMD_FILE_DATA: 2698 rc = qed_nvm_flash_image_file_data(cdev, &data, 2699 &check_resp); 2700 break; 2701 case QED_NVM_FLASH_CMD_FILE_START: 2702 rc = qed_nvm_flash_image_file_start(cdev, &data, 2703 &check_resp); 2704 break; 2705 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2706 rc = qed_nvm_flash_image_access(cdev, &data, 2707 &check_resp); 2708 break; 2709 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2710 rc = qed_nvm_flash_cfg_write(cdev, &data); 2711 break; 2712 default: 2713 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2714 rc = -EINVAL; 2715 goto exit; 2716 } 2717 2718 if (rc) { 2719 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2720 goto exit; 2721 } 2722 2723 /* Check response if needed */ 2724 if (check_resp) { 2725 u32 mcp_response = 0; 2726 2727 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2728 DP_ERR(cdev, "Failed getting MCP response\n"); 2729 rc = -EINVAL; 2730 goto exit; 2731 } 2732 2733 switch (mcp_response & FW_MSG_CODE_MASK) { 2734 case FW_MSG_CODE_OK: 2735 case FW_MSG_CODE_NVM_OK: 2736 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2737 case FW_MSG_CODE_PHY_OK: 2738 break; 2739 default: 2740 DP_ERR(cdev, "MFW returns error: %08x\n", 2741 mcp_response); 2742 rc = -EINVAL; 2743 goto exit; 2744 } 2745 } 2746 } 2747 2748 exit: 2749 release_firmware(image); 2750 2751 return rc; 2752 } 2753 2754 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2755 u8 *buf, u16 len) 2756 { 2757 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2758 2759 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2760 } 2761 2762 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2763 { 2764 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2765 void *cookie = p_hwfn->cdev->ops_cookie; 2766 2767 if (ops && ops->schedule_recovery_handler) 2768 ops->schedule_recovery_handler(cookie); 2769 } 2770 2771 static const char * const qed_hw_err_type_descr[] = { 2772 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2773 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2774 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2775 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2776 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2777 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2778 [QED_HW_ERR_LAST] = "Unknown", 2779 }; 2780 2781 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2782 enum qed_hw_err_type err_type) 2783 { 2784 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2785 void *cookie = p_hwfn->cdev->ops_cookie; 2786 const char *err_str; 2787 2788 if (err_type > QED_HW_ERR_LAST) 2789 err_type = QED_HW_ERR_LAST; 2790 err_str = qed_hw_err_type_descr[err_type]; 2791 2792 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2793 2794 /* Call the HW error handler of the protocol driver. 2795 * If it is not available - perform a minimal handling of preventing 2796 * HW attentions from being reasserted. 2797 */ 2798 if (ops && ops->schedule_hw_err_handler) 2799 ops->schedule_hw_err_handler(cookie, err_type); 2800 else 2801 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2802 } 2803 2804 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2805 void *handle) 2806 { 2807 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2808 } 2809 2810 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2811 { 2812 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2813 struct qed_ptt *ptt; 2814 int status = 0; 2815 2816 ptt = qed_ptt_acquire(hwfn); 2817 if (!ptt) 2818 return -EAGAIN; 2819 2820 status = qed_mcp_set_led(hwfn, ptt, mode); 2821 2822 qed_ptt_release(hwfn, ptt); 2823 2824 return status; 2825 } 2826 2827 int qed_recovery_process(struct qed_dev *cdev) 2828 { 2829 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2830 struct qed_ptt *p_ptt; 2831 int rc = 0; 2832 2833 p_ptt = qed_ptt_acquire(p_hwfn); 2834 if (!p_ptt) 2835 return -EAGAIN; 2836 2837 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2838 2839 qed_ptt_release(p_hwfn, p_ptt); 2840 2841 return rc; 2842 } 2843 2844 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2845 { 2846 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2847 struct qed_ptt *ptt; 2848 int rc = 0; 2849 2850 if (IS_VF(cdev)) 2851 return 0; 2852 2853 ptt = qed_ptt_acquire(hwfn); 2854 if (!ptt) 2855 return -EAGAIN; 2856 2857 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2858 : QED_OV_WOL_DISABLED); 2859 if (rc) 2860 goto out; 2861 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2862 2863 out: 2864 qed_ptt_release(hwfn, ptt); 2865 return rc; 2866 } 2867 2868 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2869 { 2870 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2871 struct qed_ptt *ptt; 2872 int status = 0; 2873 2874 if (IS_VF(cdev)) 2875 return 0; 2876 2877 ptt = qed_ptt_acquire(hwfn); 2878 if (!ptt) 2879 return -EAGAIN; 2880 2881 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2882 QED_OV_DRIVER_STATE_ACTIVE : 2883 QED_OV_DRIVER_STATE_DISABLED); 2884 2885 qed_ptt_release(hwfn, ptt); 2886 2887 return status; 2888 } 2889 2890 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2891 { 2892 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2893 struct qed_ptt *ptt; 2894 int status = 0; 2895 2896 if (IS_VF(cdev)) 2897 return 0; 2898 2899 ptt = qed_ptt_acquire(hwfn); 2900 if (!ptt) 2901 return -EAGAIN; 2902 2903 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2904 if (status) 2905 goto out; 2906 2907 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2908 2909 out: 2910 qed_ptt_release(hwfn, ptt); 2911 return status; 2912 } 2913 2914 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2915 { 2916 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2917 struct qed_ptt *ptt; 2918 int status = 0; 2919 2920 if (IS_VF(cdev)) 2921 return 0; 2922 2923 ptt = qed_ptt_acquire(hwfn); 2924 if (!ptt) 2925 return -EAGAIN; 2926 2927 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2928 if (status) 2929 goto out; 2930 2931 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2932 2933 out: 2934 qed_ptt_release(hwfn, ptt); 2935 return status; 2936 } 2937 2938 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2939 u8 dev_addr, u32 offset, u32 len) 2940 { 2941 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2942 struct qed_ptt *ptt; 2943 int rc = 0; 2944 2945 if (IS_VF(cdev)) 2946 return 0; 2947 2948 ptt = qed_ptt_acquire(hwfn); 2949 if (!ptt) 2950 return -EAGAIN; 2951 2952 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2953 offset, len, buf); 2954 2955 qed_ptt_release(hwfn, ptt); 2956 2957 return rc; 2958 } 2959 2960 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2961 { 2962 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2963 struct qed_ptt *ptt; 2964 int rc = 0; 2965 2966 if (IS_VF(cdev)) 2967 return 0; 2968 2969 ptt = qed_ptt_acquire(hwfn); 2970 if (!ptt) 2971 return -EAGAIN; 2972 2973 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2974 2975 qed_ptt_release(hwfn, ptt); 2976 2977 return rc; 2978 } 2979 2980 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2981 { 2982 return QED_AFFIN_HWFN_IDX(cdev); 2983 } 2984 2985 static struct qed_selftest_ops qed_selftest_ops_pass = { 2986 .selftest_memory = &qed_selftest_memory, 2987 .selftest_interrupt = &qed_selftest_interrupt, 2988 .selftest_register = &qed_selftest_register, 2989 .selftest_clock = &qed_selftest_clock, 2990 .selftest_nvram = &qed_selftest_nvram, 2991 }; 2992 2993 const struct qed_common_ops qed_common_ops_pass = { 2994 .selftest = &qed_selftest_ops_pass, 2995 .probe = &qed_probe, 2996 .remove = &qed_remove, 2997 .set_power_state = &qed_set_power_state, 2998 .set_name = &qed_set_name, 2999 .update_pf_params = &qed_update_pf_params, 3000 .slowpath_start = &qed_slowpath_start, 3001 .slowpath_stop = &qed_slowpath_stop, 3002 .set_fp_int = &qed_set_int_fp, 3003 .get_fp_int = &qed_get_int_fp, 3004 .sb_init = &qed_sb_init, 3005 .sb_release = &qed_sb_release, 3006 .simd_handler_config = &qed_simd_handler_config, 3007 .simd_handler_clean = &qed_simd_handler_clean, 3008 .dbg_grc = &qed_dbg_grc, 3009 .dbg_grc_size = &qed_dbg_grc_size, 3010 .can_link_change = &qed_can_link_change, 3011 .set_link = &qed_set_link, 3012 .get_link = &qed_get_current_link, 3013 .drain = &qed_drain, 3014 .update_msglvl = &qed_init_dp, 3015 .devlink_register = qed_devlink_register, 3016 .devlink_unregister = qed_devlink_unregister, 3017 .report_fatal_error = qed_report_fatal_error, 3018 .dbg_all_data = &qed_dbg_all_data, 3019 .dbg_all_data_size = &qed_dbg_all_data_size, 3020 .chain_alloc = &qed_chain_alloc, 3021 .chain_free = &qed_chain_free, 3022 .nvm_flash = &qed_nvm_flash, 3023 .nvm_get_image = &qed_nvm_get_image, 3024 .set_coalesce = &qed_set_coalesce, 3025 .set_led = &qed_set_led, 3026 .recovery_process = &qed_recovery_process, 3027 .recovery_prolog = &qed_recovery_prolog, 3028 .attn_clr_enable = &qed_int_attn_clr_enable, 3029 .update_drv_state = &qed_update_drv_state, 3030 .update_mac = &qed_update_mac, 3031 .update_mtu = &qed_update_mtu, 3032 .update_wol = &qed_update_wol, 3033 .db_recovery_add = &qed_db_recovery_add, 3034 .db_recovery_del = &qed_db_recovery_del, 3035 .read_module_eeprom = &qed_read_module_eeprom, 3036 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3037 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3038 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3039 .set_grc_config = &qed_set_grc_config, 3040 }; 3041 3042 void qed_get_protocol_stats(struct qed_dev *cdev, 3043 enum qed_mcp_protocol_type type, 3044 union qed_mcp_protocol_stats *stats) 3045 { 3046 struct qed_eth_stats eth_stats; 3047 3048 memset(stats, 0, sizeof(*stats)); 3049 3050 switch (type) { 3051 case QED_MCP_LAN_STATS: 3052 qed_get_vport_stats(cdev, ð_stats); 3053 stats->lan_stats.ucast_rx_pkts = 3054 eth_stats.common.rx_ucast_pkts; 3055 stats->lan_stats.ucast_tx_pkts = 3056 eth_stats.common.tx_ucast_pkts; 3057 stats->lan_stats.fcs_err = -1; 3058 break; 3059 case QED_MCP_FCOE_STATS: 3060 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 3061 break; 3062 case QED_MCP_ISCSI_STATS: 3063 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 3064 break; 3065 default: 3066 DP_VERBOSE(cdev, QED_MSG_SP, 3067 "Invalid protocol type = %d\n", type); 3068 return; 3069 } 3070 } 3071 3072 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3073 { 3074 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3075 "Scheduling slowpath task [Flag: %d]\n", 3076 QED_SLOWPATH_MFW_TLV_REQ); 3077 smp_mb__before_atomic(); 3078 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3079 smp_mb__after_atomic(); 3080 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3081 3082 return 0; 3083 } 3084 3085 static void 3086 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3087 { 3088 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3089 struct qed_eth_stats_common *p_common; 3090 struct qed_generic_tlvs gen_tlvs; 3091 struct qed_eth_stats stats; 3092 int i; 3093 3094 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3095 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3096 3097 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3098 tlv->flags.ipv4_csum_offload = true; 3099 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3100 tlv->flags.lso_supported = true; 3101 tlv->flags.b_set = true; 3102 3103 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3104 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3105 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3106 tlv->mac_set[i] = true; 3107 } 3108 } 3109 3110 qed_get_vport_stats(cdev, &stats); 3111 p_common = &stats.common; 3112 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3113 p_common->rx_bcast_pkts; 3114 tlv->rx_frames_set = true; 3115 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3116 p_common->rx_bcast_bytes; 3117 tlv->rx_bytes_set = true; 3118 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3119 p_common->tx_bcast_pkts; 3120 tlv->tx_frames_set = true; 3121 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3122 p_common->tx_bcast_bytes; 3123 tlv->rx_bytes_set = true; 3124 } 3125 3126 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3127 union qed_mfw_tlv_data *tlv_buf) 3128 { 3129 struct qed_dev *cdev = hwfn->cdev; 3130 struct qed_common_cb_ops *ops; 3131 3132 ops = cdev->protocol_ops.common; 3133 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3134 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3135 return -EINVAL; 3136 } 3137 3138 switch (type) { 3139 case QED_MFW_TLV_GENERIC: 3140 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3141 break; 3142 case QED_MFW_TLV_ETH: 3143 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3144 break; 3145 case QED_MFW_TLV_FCOE: 3146 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3147 break; 3148 case QED_MFW_TLV_ISCSI: 3149 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3150 break; 3151 default: 3152 break; 3153 } 3154 3155 return 0; 3156 } 3157