1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/aer.h> 27 #include <linux/phylink.h> 28 29 #include "qed.h" 30 #include "qed_sriov.h" 31 #include "qed_sp.h" 32 #include "qed_dev_api.h" 33 #include "qed_ll2.h" 34 #include "qed_fcoe.h" 35 #include "qed_iscsi.h" 36 37 #include "qed_mcp.h" 38 #include "qed_reg_addr.h" 39 #include "qed_hw.h" 40 #include "qed_selftest.h" 41 #include "qed_debug.h" 42 #include "qed_devlink.h" 43 44 #define QED_ROCE_QPS (8192) 45 #define QED_ROCE_DPIS (8) 46 #define QED_RDMA_SRQS QED_ROCE_QPS 47 #define QED_NVM_CFG_GET_FLAGS 0xA 48 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 49 #define QED_NVM_CFG_MAX_ATTRS 50 50 51 static char version[] = 52 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 53 54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 55 MODULE_LICENSE("GPL"); 56 MODULE_VERSION(DRV_MODULE_VERSION); 57 58 #define FW_FILE_VERSION \ 59 __stringify(FW_MAJOR_VERSION) "." \ 60 __stringify(FW_MINOR_VERSION) "." \ 61 __stringify(FW_REVISION_VERSION) "." \ 62 __stringify(FW_ENGINEERING_VERSION) 63 64 #define QED_FW_FILE_NAME \ 65 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 66 67 MODULE_FIRMWARE(QED_FW_FILE_NAME); 68 69 /* MFW speed capabilities maps */ 70 71 struct qed_mfw_speed_map { 72 u32 mfw_val; 73 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 74 75 const u32 *cap_arr; 76 u32 arr_size; 77 }; 78 79 #define QED_MFW_SPEED_MAP(type, arr) \ 80 { \ 81 .mfw_val = (type), \ 82 .cap_arr = (arr), \ 83 .arr_size = ARRAY_SIZE(arr), \ 84 } 85 86 static const u32 qed_mfw_ext_1g[] __initconst = { 87 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 88 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 89 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 90 }; 91 92 static const u32 qed_mfw_ext_10g[] __initconst = { 93 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 95 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 96 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 97 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 99 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 100 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 101 }; 102 103 static const u32 qed_mfw_ext_20g[] __initconst = { 104 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 105 }; 106 107 static const u32 qed_mfw_ext_25g[] __initconst = { 108 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 109 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 110 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 111 }; 112 113 static const u32 qed_mfw_ext_40g[] __initconst = { 114 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 115 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 116 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 117 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 118 }; 119 120 static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 121 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 122 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 123 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 124 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 126 }; 127 128 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 129 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 130 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 131 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 132 }; 133 134 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 135 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 136 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 137 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 138 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 140 }; 141 142 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 143 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 144 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 145 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 146 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 147 }; 148 149 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), 153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 155 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 156 qed_mfw_ext_50g_base_r), 157 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 158 qed_mfw_ext_50g_base_r2), 159 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 160 qed_mfw_ext_100g_base_r2), 161 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 162 qed_mfw_ext_100g_base_r4), 163 }; 164 165 static const u32 qed_mfw_legacy_1g[] __initconst = { 166 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 167 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 168 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 169 }; 170 171 static const u32 qed_mfw_legacy_10g[] __initconst = { 172 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 173 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 174 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 175 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 176 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 177 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 178 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 179 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 180 }; 181 182 static const u32 qed_mfw_legacy_20g[] __initconst = { 183 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 184 }; 185 186 static const u32 qed_mfw_legacy_25g[] __initconst = { 187 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 188 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 189 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 190 }; 191 192 static const u32 qed_mfw_legacy_40g[] __initconst = { 193 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 194 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 195 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 196 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 197 }; 198 199 static const u32 qed_mfw_legacy_50g[] __initconst = { 200 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 201 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 202 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 203 }; 204 205 static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 206 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 207 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 208 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 209 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 210 }; 211 212 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 213 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 214 qed_mfw_legacy_1g), 215 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 216 qed_mfw_legacy_10g), 217 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 218 qed_mfw_legacy_20g), 219 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 220 qed_mfw_legacy_25g), 221 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 222 qed_mfw_legacy_40g), 223 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 224 qed_mfw_legacy_50g), 225 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 226 qed_mfw_legacy_bb_100g), 227 }; 228 229 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 230 { 231 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 232 233 map->cap_arr = NULL; 234 map->arr_size = 0; 235 } 236 237 static void __init qed_mfw_speed_maps_init(void) 238 { 239 u32 i; 240 241 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 242 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 243 244 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 245 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 246 } 247 248 static int __init qed_init(void) 249 { 250 pr_info("%s", version); 251 252 qed_mfw_speed_maps_init(); 253 254 return 0; 255 } 256 module_init(qed_init); 257 258 static void __exit qed_exit(void) 259 { 260 /* To prevent marking this module as "permanent" */ 261 } 262 module_exit(qed_exit); 263 264 /* Check if the DMA controller on the machine can properly handle the DMA 265 * addressing required by the device. 266 */ 267 static int qed_set_coherency_mask(struct qed_dev *cdev) 268 { 269 struct device *dev = &cdev->pdev->dev; 270 271 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 272 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 273 DP_NOTICE(cdev, 274 "Can't request 64-bit consistent allocations\n"); 275 return -EIO; 276 } 277 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 278 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 279 return -EIO; 280 } 281 282 return 0; 283 } 284 285 static void qed_free_pci(struct qed_dev *cdev) 286 { 287 struct pci_dev *pdev = cdev->pdev; 288 289 pci_disable_pcie_error_reporting(pdev); 290 291 if (cdev->doorbells && cdev->db_size) 292 iounmap(cdev->doorbells); 293 if (cdev->regview) 294 iounmap(cdev->regview); 295 if (atomic_read(&pdev->enable_cnt) == 1) 296 pci_release_regions(pdev); 297 298 pci_disable_device(pdev); 299 } 300 301 #define PCI_REVISION_ID_ERROR_VAL 0xff 302 303 /* Performs PCI initializations as well as initializing PCI-related parameters 304 * in the device structrue. Returns 0 in case of success. 305 */ 306 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 307 { 308 u8 rev_id; 309 int rc; 310 311 cdev->pdev = pdev; 312 313 rc = pci_enable_device(pdev); 314 if (rc) { 315 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 316 goto err0; 317 } 318 319 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 320 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 321 rc = -EIO; 322 goto err1; 323 } 324 325 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 326 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 327 rc = -EIO; 328 goto err1; 329 } 330 331 if (atomic_read(&pdev->enable_cnt) == 1) { 332 rc = pci_request_regions(pdev, "qed"); 333 if (rc) { 334 DP_NOTICE(cdev, 335 "Failed to request PCI memory resources\n"); 336 goto err1; 337 } 338 pci_set_master(pdev); 339 pci_save_state(pdev); 340 } 341 342 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 343 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 344 DP_NOTICE(cdev, 345 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 346 rev_id); 347 rc = -ENODEV; 348 goto err2; 349 } 350 if (!pci_is_pcie(pdev)) { 351 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 352 rc = -EIO; 353 goto err2; 354 } 355 356 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 357 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 358 DP_NOTICE(cdev, "Cannot find power management capability\n"); 359 360 rc = qed_set_coherency_mask(cdev); 361 if (rc) 362 goto err2; 363 364 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 365 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 366 cdev->pci_params.irq = pdev->irq; 367 368 cdev->regview = pci_ioremap_bar(pdev, 0); 369 if (!cdev->regview) { 370 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 371 rc = -ENOMEM; 372 goto err2; 373 } 374 375 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 376 cdev->db_size = pci_resource_len(cdev->pdev, 2); 377 if (!cdev->db_size) { 378 if (IS_PF(cdev)) { 379 DP_NOTICE(cdev, "No Doorbell bar available\n"); 380 return -EINVAL; 381 } else { 382 return 0; 383 } 384 } 385 386 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 387 388 if (!cdev->doorbells) { 389 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 390 return -ENOMEM; 391 } 392 393 /* AER (Advanced Error reporting) configuration */ 394 rc = pci_enable_pcie_error_reporting(pdev); 395 if (rc) 396 DP_VERBOSE(cdev, NETIF_MSG_DRV, 397 "Failed to configure PCIe AER [%d]\n", rc); 398 399 return 0; 400 401 err2: 402 pci_release_regions(pdev); 403 err1: 404 pci_disable_device(pdev); 405 err0: 406 return rc; 407 } 408 409 int qed_fill_dev_info(struct qed_dev *cdev, 410 struct qed_dev_info *dev_info) 411 { 412 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 413 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 414 struct qed_tunnel_info *tun = &cdev->tunnel; 415 struct qed_ptt *ptt; 416 417 memset(dev_info, 0, sizeof(struct qed_dev_info)); 418 419 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 420 tun->vxlan.b_mode_enabled) 421 dev_info->vxlan_enable = true; 422 423 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 424 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 425 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 426 dev_info->gre_enable = true; 427 428 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 429 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 430 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 431 dev_info->geneve_enable = true; 432 433 dev_info->num_hwfns = cdev->num_hwfns; 434 dev_info->pci_mem_start = cdev->pci_params.mem_start; 435 dev_info->pci_mem_end = cdev->pci_params.mem_end; 436 dev_info->pci_irq = cdev->pci_params.irq; 437 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 438 dev_info->dev_type = cdev->type; 439 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 440 441 if (IS_PF(cdev)) { 442 dev_info->fw_major = FW_MAJOR_VERSION; 443 dev_info->fw_minor = FW_MINOR_VERSION; 444 dev_info->fw_rev = FW_REVISION_VERSION; 445 dev_info->fw_eng = FW_ENGINEERING_VERSION; 446 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 447 &cdev->mf_bits); 448 dev_info->tx_switching = true; 449 450 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 451 dev_info->wol_support = true; 452 453 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 454 455 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 456 } else { 457 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 458 &dev_info->fw_minor, &dev_info->fw_rev, 459 &dev_info->fw_eng); 460 } 461 462 if (IS_PF(cdev)) { 463 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 464 if (ptt) { 465 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 466 &dev_info->mfw_rev, NULL); 467 468 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 469 &dev_info->mbi_version); 470 471 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 472 &dev_info->flash_size); 473 474 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 475 } 476 } else { 477 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 478 &dev_info->mfw_rev, NULL); 479 } 480 481 dev_info->mtu = hw_info->mtu; 482 cdev->common_dev_info = *dev_info; 483 484 return 0; 485 } 486 487 static void qed_free_cdev(struct qed_dev *cdev) 488 { 489 kfree((void *)cdev); 490 } 491 492 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 493 { 494 struct qed_dev *cdev; 495 496 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 497 if (!cdev) 498 return cdev; 499 500 qed_init_struct(cdev); 501 502 return cdev; 503 } 504 505 /* Sets the requested power state */ 506 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 507 { 508 if (!cdev) 509 return -ENODEV; 510 511 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 512 return 0; 513 } 514 515 /* probing */ 516 static struct qed_dev *qed_probe(struct pci_dev *pdev, 517 struct qed_probe_params *params) 518 { 519 struct qed_dev *cdev; 520 int rc; 521 522 cdev = qed_alloc_cdev(pdev); 523 if (!cdev) 524 goto err0; 525 526 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 527 cdev->protocol = params->protocol; 528 529 if (params->is_vf) 530 cdev->b_is_vf = true; 531 532 qed_init_dp(cdev, params->dp_module, params->dp_level); 533 534 cdev->recov_in_prog = params->recov_in_prog; 535 536 rc = qed_init_pci(cdev, pdev); 537 if (rc) { 538 DP_ERR(cdev, "init pci failed\n"); 539 goto err1; 540 } 541 DP_INFO(cdev, "PCI init completed successfully\n"); 542 543 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 544 if (rc) { 545 DP_ERR(cdev, "hw prepare failed\n"); 546 goto err2; 547 } 548 549 DP_INFO(cdev, "qed_probe completed successfully\n"); 550 551 return cdev; 552 553 err2: 554 qed_free_pci(cdev); 555 err1: 556 qed_free_cdev(cdev); 557 err0: 558 return NULL; 559 } 560 561 static void qed_remove(struct qed_dev *cdev) 562 { 563 if (!cdev) 564 return; 565 566 qed_hw_remove(cdev); 567 568 qed_free_pci(cdev); 569 570 qed_set_power_state(cdev, PCI_D3hot); 571 572 qed_free_cdev(cdev); 573 } 574 575 static void qed_disable_msix(struct qed_dev *cdev) 576 { 577 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 578 pci_disable_msix(cdev->pdev); 579 kfree(cdev->int_params.msix_table); 580 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 581 pci_disable_msi(cdev->pdev); 582 } 583 584 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 585 } 586 587 static int qed_enable_msix(struct qed_dev *cdev, 588 struct qed_int_params *int_params) 589 { 590 int i, rc, cnt; 591 592 cnt = int_params->in.num_vectors; 593 594 for (i = 0; i < cnt; i++) 595 int_params->msix_table[i].entry = i; 596 597 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 598 int_params->in.min_msix_cnt, cnt); 599 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 600 (rc % cdev->num_hwfns)) { 601 pci_disable_msix(cdev->pdev); 602 603 /* If fastpath is initialized, we need at least one interrupt 604 * per hwfn [and the slow path interrupts]. New requested number 605 * should be a multiple of the number of hwfns. 606 */ 607 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 608 DP_NOTICE(cdev, 609 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 610 cnt, int_params->in.num_vectors); 611 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 612 cnt); 613 if (!rc) 614 rc = cnt; 615 } 616 617 if (rc > 0) { 618 /* MSI-x configuration was achieved */ 619 int_params->out.int_mode = QED_INT_MODE_MSIX; 620 int_params->out.num_vectors = rc; 621 rc = 0; 622 } else { 623 DP_NOTICE(cdev, 624 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 625 cnt, rc); 626 } 627 628 return rc; 629 } 630 631 /* This function outputs the int mode and the number of enabled msix vector */ 632 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 633 { 634 struct qed_int_params *int_params = &cdev->int_params; 635 struct msix_entry *tbl; 636 int rc = 0, cnt; 637 638 switch (int_params->in.int_mode) { 639 case QED_INT_MODE_MSIX: 640 /* Allocate MSIX table */ 641 cnt = int_params->in.num_vectors; 642 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 643 if (!int_params->msix_table) { 644 rc = -ENOMEM; 645 goto out; 646 } 647 648 /* Enable MSIX */ 649 rc = qed_enable_msix(cdev, int_params); 650 if (!rc) 651 goto out; 652 653 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 654 kfree(int_params->msix_table); 655 if (force_mode) 656 goto out; 657 /* Fallthrough */ 658 659 case QED_INT_MODE_MSI: 660 if (cdev->num_hwfns == 1) { 661 rc = pci_enable_msi(cdev->pdev); 662 if (!rc) { 663 int_params->out.int_mode = QED_INT_MODE_MSI; 664 goto out; 665 } 666 667 DP_NOTICE(cdev, "Failed to enable MSI\n"); 668 if (force_mode) 669 goto out; 670 } 671 /* Fallthrough */ 672 673 case QED_INT_MODE_INTA: 674 int_params->out.int_mode = QED_INT_MODE_INTA; 675 rc = 0; 676 goto out; 677 default: 678 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 679 int_params->in.int_mode); 680 rc = -EINVAL; 681 } 682 683 out: 684 if (!rc) 685 DP_INFO(cdev, "Using %s interrupts\n", 686 int_params->out.int_mode == QED_INT_MODE_INTA ? 687 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 688 "MSI" : "MSIX"); 689 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 690 691 return rc; 692 } 693 694 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 695 int index, void(*handler)(void *)) 696 { 697 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 698 int relative_idx = index / cdev->num_hwfns; 699 700 hwfn->simd_proto_handler[relative_idx].func = handler; 701 hwfn->simd_proto_handler[relative_idx].token = token; 702 } 703 704 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 705 { 706 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 707 int relative_idx = index / cdev->num_hwfns; 708 709 memset(&hwfn->simd_proto_handler[relative_idx], 0, 710 sizeof(struct qed_simd_fp_handler)); 711 } 712 713 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 714 { 715 tasklet_schedule((struct tasklet_struct *)tasklet); 716 return IRQ_HANDLED; 717 } 718 719 static irqreturn_t qed_single_int(int irq, void *dev_instance) 720 { 721 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 722 struct qed_hwfn *hwfn; 723 irqreturn_t rc = IRQ_NONE; 724 u64 status; 725 int i, j; 726 727 for (i = 0; i < cdev->num_hwfns; i++) { 728 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 729 730 if (!status) 731 continue; 732 733 hwfn = &cdev->hwfns[i]; 734 735 /* Slowpath interrupt */ 736 if (unlikely(status & 0x1)) { 737 tasklet_schedule(hwfn->sp_dpc); 738 status &= ~0x1; 739 rc = IRQ_HANDLED; 740 } 741 742 /* Fastpath interrupts */ 743 for (j = 0; j < 64; j++) { 744 if ((0x2ULL << j) & status) { 745 struct qed_simd_fp_handler *p_handler = 746 &hwfn->simd_proto_handler[j]; 747 748 if (p_handler->func) 749 p_handler->func(p_handler->token); 750 else 751 DP_NOTICE(hwfn, 752 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 753 j, status); 754 755 status &= ~(0x2ULL << j); 756 rc = IRQ_HANDLED; 757 } 758 } 759 760 if (unlikely(status)) 761 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 762 "got an unknown interrupt status 0x%llx\n", 763 status); 764 } 765 766 return rc; 767 } 768 769 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 770 { 771 struct qed_dev *cdev = hwfn->cdev; 772 u32 int_mode; 773 int rc = 0; 774 u8 id; 775 776 int_mode = cdev->int_params.out.int_mode; 777 if (int_mode == QED_INT_MODE_MSIX) { 778 id = hwfn->my_id; 779 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 780 id, cdev->pdev->bus->number, 781 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 782 rc = request_irq(cdev->int_params.msix_table[id].vector, 783 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 784 } else { 785 unsigned long flags = 0; 786 787 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 788 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 789 PCI_FUNC(cdev->pdev->devfn)); 790 791 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 792 flags |= IRQF_SHARED; 793 794 rc = request_irq(cdev->pdev->irq, qed_single_int, 795 flags, cdev->name, cdev); 796 } 797 798 if (rc) 799 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 800 else 801 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 802 "Requested slowpath %s\n", 803 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 804 805 return rc; 806 } 807 808 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 809 { 810 /* Calling the disable function will make sure that any 811 * currently-running function is completed. The following call to the 812 * enable function makes this sequence a flush-like operation. 813 */ 814 if (p_hwfn->b_sp_dpc_enabled) { 815 tasklet_disable(p_hwfn->sp_dpc); 816 tasklet_enable(p_hwfn->sp_dpc); 817 } 818 } 819 820 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 821 { 822 struct qed_dev *cdev = p_hwfn->cdev; 823 u8 id = p_hwfn->my_id; 824 u32 int_mode; 825 826 int_mode = cdev->int_params.out.int_mode; 827 if (int_mode == QED_INT_MODE_MSIX) 828 synchronize_irq(cdev->int_params.msix_table[id].vector); 829 else 830 synchronize_irq(cdev->pdev->irq); 831 832 qed_slowpath_tasklet_flush(p_hwfn); 833 } 834 835 static void qed_slowpath_irq_free(struct qed_dev *cdev) 836 { 837 int i; 838 839 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 840 for_each_hwfn(cdev, i) { 841 if (!cdev->hwfns[i].b_int_requested) 842 break; 843 synchronize_irq(cdev->int_params.msix_table[i].vector); 844 free_irq(cdev->int_params.msix_table[i].vector, 845 cdev->hwfns[i].sp_dpc); 846 } 847 } else { 848 if (QED_LEADING_HWFN(cdev)->b_int_requested) 849 free_irq(cdev->pdev->irq, cdev); 850 } 851 qed_int_disable_post_isr_release(cdev); 852 } 853 854 static int qed_nic_stop(struct qed_dev *cdev) 855 { 856 int i, rc; 857 858 rc = qed_hw_stop(cdev); 859 860 for (i = 0; i < cdev->num_hwfns; i++) { 861 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 862 863 if (p_hwfn->b_sp_dpc_enabled) { 864 tasklet_disable(p_hwfn->sp_dpc); 865 p_hwfn->b_sp_dpc_enabled = false; 866 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 867 "Disabled sp tasklet [hwfn %d] at %p\n", 868 i, p_hwfn->sp_dpc); 869 } 870 } 871 872 qed_dbg_pf_exit(cdev); 873 874 return rc; 875 } 876 877 static int qed_nic_setup(struct qed_dev *cdev) 878 { 879 int rc, i; 880 881 /* Determine if interface is going to require LL2 */ 882 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 883 for (i = 0; i < cdev->num_hwfns; i++) { 884 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 885 886 p_hwfn->using_ll2 = true; 887 } 888 } 889 890 rc = qed_resc_alloc(cdev); 891 if (rc) 892 return rc; 893 894 DP_INFO(cdev, "Allocated qed resources\n"); 895 896 qed_resc_setup(cdev); 897 898 return rc; 899 } 900 901 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 902 { 903 int limit = 0; 904 905 /* Mark the fastpath as free/used */ 906 cdev->int_params.fp_initialized = cnt ? true : false; 907 908 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 909 limit = cdev->num_hwfns * 63; 910 else if (cdev->int_params.fp_msix_cnt) 911 limit = cdev->int_params.fp_msix_cnt; 912 913 if (!limit) 914 return -ENOMEM; 915 916 return min_t(int, cnt, limit); 917 } 918 919 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 920 { 921 memset(info, 0, sizeof(struct qed_int_info)); 922 923 if (!cdev->int_params.fp_initialized) { 924 DP_INFO(cdev, 925 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 926 return -EINVAL; 927 } 928 929 /* Need to expose only MSI-X information; Single IRQ is handled solely 930 * by qed. 931 */ 932 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 933 int msix_base = cdev->int_params.fp_msix_base; 934 935 info->msix_cnt = cdev->int_params.fp_msix_cnt; 936 info->msix = &cdev->int_params.msix_table[msix_base]; 937 } 938 939 return 0; 940 } 941 942 static int qed_slowpath_setup_int(struct qed_dev *cdev, 943 enum qed_int_mode int_mode) 944 { 945 struct qed_sb_cnt_info sb_cnt_info; 946 int num_l2_queues = 0; 947 int rc; 948 int i; 949 950 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 951 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 952 return -EINVAL; 953 } 954 955 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 956 cdev->int_params.in.int_mode = int_mode; 957 for_each_hwfn(cdev, i) { 958 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 959 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 960 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 961 cdev->int_params.in.num_vectors++; /* slowpath */ 962 } 963 964 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 965 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 966 967 if (is_kdump_kernel()) { 968 DP_INFO(cdev, 969 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 970 cdev->int_params.in.min_msix_cnt); 971 cdev->int_params.in.num_vectors = 972 cdev->int_params.in.min_msix_cnt; 973 } 974 975 rc = qed_set_int_mode(cdev, false); 976 if (rc) { 977 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 978 return rc; 979 } 980 981 cdev->int_params.fp_msix_base = cdev->num_hwfns; 982 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 983 cdev->num_hwfns; 984 985 if (!IS_ENABLED(CONFIG_QED_RDMA) || 986 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 987 return 0; 988 989 for_each_hwfn(cdev, i) 990 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 991 992 DP_VERBOSE(cdev, QED_MSG_RDMA, 993 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 994 cdev->int_params.fp_msix_cnt, num_l2_queues); 995 996 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 997 cdev->int_params.rdma_msix_cnt = 998 (cdev->int_params.fp_msix_cnt - num_l2_queues) 999 / cdev->num_hwfns; 1000 cdev->int_params.rdma_msix_base = 1001 cdev->int_params.fp_msix_base + num_l2_queues; 1002 cdev->int_params.fp_msix_cnt = num_l2_queues; 1003 } else { 1004 cdev->int_params.rdma_msix_cnt = 0; 1005 } 1006 1007 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 1008 cdev->int_params.rdma_msix_cnt, 1009 cdev->int_params.rdma_msix_base); 1010 1011 return 0; 1012 } 1013 1014 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 1015 { 1016 int rc; 1017 1018 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 1019 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 1020 1021 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 1022 &cdev->int_params.in.num_vectors); 1023 if (cdev->num_hwfns > 1) { 1024 u8 vectors = 0; 1025 1026 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 1027 cdev->int_params.in.num_vectors += vectors; 1028 } 1029 1030 /* We want a minimum of one fastpath vector per vf hwfn */ 1031 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1032 1033 rc = qed_set_int_mode(cdev, true); 1034 if (rc) 1035 return rc; 1036 1037 cdev->int_params.fp_msix_base = 0; 1038 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1039 1040 return 0; 1041 } 1042 1043 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1044 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1045 { 1046 int rc; 1047 1048 p_hwfn->stream->next_in = input_buf; 1049 p_hwfn->stream->avail_in = input_len; 1050 p_hwfn->stream->next_out = unzip_buf; 1051 p_hwfn->stream->avail_out = max_size; 1052 1053 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1054 1055 if (rc != Z_OK) { 1056 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1057 rc); 1058 return 0; 1059 } 1060 1061 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1062 zlib_inflateEnd(p_hwfn->stream); 1063 1064 if (rc != Z_OK && rc != Z_STREAM_END) { 1065 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1066 p_hwfn->stream->msg, rc); 1067 return 0; 1068 } 1069 1070 return p_hwfn->stream->total_out / 4; 1071 } 1072 1073 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1074 { 1075 int i; 1076 void *workspace; 1077 1078 for_each_hwfn(cdev, i) { 1079 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1080 1081 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1082 if (!p_hwfn->stream) 1083 return -ENOMEM; 1084 1085 workspace = vzalloc(zlib_inflate_workspacesize()); 1086 if (!workspace) 1087 return -ENOMEM; 1088 p_hwfn->stream->workspace = workspace; 1089 } 1090 1091 return 0; 1092 } 1093 1094 static void qed_free_stream_mem(struct qed_dev *cdev) 1095 { 1096 int i; 1097 1098 for_each_hwfn(cdev, i) { 1099 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1100 1101 if (!p_hwfn->stream) 1102 return; 1103 1104 vfree(p_hwfn->stream->workspace); 1105 kfree(p_hwfn->stream); 1106 } 1107 } 1108 1109 static void qed_update_pf_params(struct qed_dev *cdev, 1110 struct qed_pf_params *params) 1111 { 1112 int i; 1113 1114 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1115 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1116 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1117 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1118 /* divide by 3 the MRs to avoid MF ILT overflow */ 1119 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1120 } 1121 1122 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1123 params->eth_pf_params.num_arfs_filters = 0; 1124 1125 /* In case we might support RDMA, don't allow qede to be greedy 1126 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1127 * per hwfn. 1128 */ 1129 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1130 u16 *num_cons; 1131 1132 num_cons = ¶ms->eth_pf_params.num_cons; 1133 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1134 } 1135 1136 for (i = 0; i < cdev->num_hwfns; i++) { 1137 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1138 1139 p_hwfn->pf_params = *params; 1140 } 1141 } 1142 1143 #define QED_PERIODIC_DB_REC_COUNT 10 1144 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1145 #define QED_PERIODIC_DB_REC_INTERVAL \ 1146 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1147 1148 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1149 enum qed_slowpath_wq_flag wq_flag, 1150 unsigned long delay) 1151 { 1152 if (!hwfn->slowpath_wq_active) 1153 return -EINVAL; 1154 1155 /* Memory barrier for setting atomic bit */ 1156 smp_mb__before_atomic(); 1157 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1158 smp_mb__after_atomic(); 1159 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1160 1161 return 0; 1162 } 1163 1164 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1165 { 1166 /* Reset periodic Doorbell Recovery counter */ 1167 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1168 1169 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1170 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1171 &p_hwfn->slowpath_task_flags)) 1172 return; 1173 1174 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1175 QED_PERIODIC_DB_REC_INTERVAL); 1176 } 1177 1178 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1179 { 1180 int i; 1181 1182 if (IS_VF(cdev)) 1183 return; 1184 1185 for_each_hwfn(cdev, i) { 1186 if (!cdev->hwfns[i].slowpath_wq) 1187 continue; 1188 1189 /* Stop queuing new delayed works */ 1190 cdev->hwfns[i].slowpath_wq_active = false; 1191 1192 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1193 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1194 } 1195 } 1196 1197 static void qed_slowpath_task(struct work_struct *work) 1198 { 1199 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1200 slowpath_task.work); 1201 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1202 1203 if (!ptt) { 1204 if (hwfn->slowpath_wq_active) 1205 queue_delayed_work(hwfn->slowpath_wq, 1206 &hwfn->slowpath_task, 0); 1207 1208 return; 1209 } 1210 1211 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1212 &hwfn->slowpath_task_flags)) 1213 qed_mfw_process_tlv_req(hwfn, ptt); 1214 1215 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1216 &hwfn->slowpath_task_flags)) { 1217 qed_db_rec_handler(hwfn, ptt); 1218 if (hwfn->periodic_db_rec_count--) 1219 qed_slowpath_delayed_work(hwfn, 1220 QED_SLOWPATH_PERIODIC_DB_REC, 1221 QED_PERIODIC_DB_REC_INTERVAL); 1222 } 1223 1224 qed_ptt_release(hwfn, ptt); 1225 } 1226 1227 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1228 { 1229 struct qed_hwfn *hwfn; 1230 char name[NAME_SIZE]; 1231 int i; 1232 1233 if (IS_VF(cdev)) 1234 return 0; 1235 1236 for_each_hwfn(cdev, i) { 1237 hwfn = &cdev->hwfns[i]; 1238 1239 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1240 cdev->pdev->bus->number, 1241 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1242 1243 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1244 if (!hwfn->slowpath_wq) { 1245 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1246 return -ENOMEM; 1247 } 1248 1249 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1250 hwfn->slowpath_wq_active = true; 1251 } 1252 1253 return 0; 1254 } 1255 1256 static int qed_slowpath_start(struct qed_dev *cdev, 1257 struct qed_slowpath_params *params) 1258 { 1259 struct qed_drv_load_params drv_load_params; 1260 struct qed_hw_init_params hw_init_params; 1261 struct qed_mcp_drv_version drv_version; 1262 struct qed_tunnel_info tunn_info; 1263 const u8 *data = NULL; 1264 struct qed_hwfn *hwfn; 1265 struct qed_ptt *p_ptt; 1266 int rc = -EINVAL; 1267 1268 if (qed_iov_wq_start(cdev)) 1269 goto err; 1270 1271 if (qed_slowpath_wq_start(cdev)) 1272 goto err; 1273 1274 if (IS_PF(cdev)) { 1275 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1276 &cdev->pdev->dev); 1277 if (rc) { 1278 DP_NOTICE(cdev, 1279 "Failed to find fw file - /lib/firmware/%s\n", 1280 QED_FW_FILE_NAME); 1281 goto err; 1282 } 1283 1284 if (cdev->num_hwfns == 1) { 1285 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1286 if (p_ptt) { 1287 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1288 } else { 1289 DP_NOTICE(cdev, 1290 "Failed to acquire PTT for aRFS\n"); 1291 goto err; 1292 } 1293 } 1294 } 1295 1296 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1297 rc = qed_nic_setup(cdev); 1298 if (rc) 1299 goto err; 1300 1301 if (IS_PF(cdev)) 1302 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1303 else 1304 rc = qed_slowpath_vf_setup_int(cdev); 1305 if (rc) 1306 goto err1; 1307 1308 if (IS_PF(cdev)) { 1309 /* Allocate stream for unzipping */ 1310 rc = qed_alloc_stream_mem(cdev); 1311 if (rc) 1312 goto err2; 1313 1314 /* First Dword used to differentiate between various sources */ 1315 data = cdev->firmware->data + sizeof(u32); 1316 1317 qed_dbg_pf_init(cdev); 1318 } 1319 1320 /* Start the slowpath */ 1321 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1322 memset(&tunn_info, 0, sizeof(tunn_info)); 1323 tunn_info.vxlan.b_mode_enabled = true; 1324 tunn_info.l2_gre.b_mode_enabled = true; 1325 tunn_info.ip_gre.b_mode_enabled = true; 1326 tunn_info.l2_geneve.b_mode_enabled = true; 1327 tunn_info.ip_geneve.b_mode_enabled = true; 1328 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1329 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1330 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1331 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1332 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1333 hw_init_params.p_tunn = &tunn_info; 1334 hw_init_params.b_hw_start = true; 1335 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1336 hw_init_params.allow_npar_tx_switch = true; 1337 hw_init_params.bin_fw_data = data; 1338 1339 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1340 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1341 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1342 drv_load_params.avoid_eng_reset = false; 1343 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1344 hw_init_params.p_drv_load_params = &drv_load_params; 1345 1346 rc = qed_hw_init(cdev, &hw_init_params); 1347 if (rc) 1348 goto err2; 1349 1350 DP_INFO(cdev, 1351 "HW initialization and function start completed successfully\n"); 1352 1353 if (IS_PF(cdev)) { 1354 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1355 BIT(QED_MODE_L2GENEVE_TUNN) | 1356 BIT(QED_MODE_IPGENEVE_TUNN) | 1357 BIT(QED_MODE_L2GRE_TUNN) | 1358 BIT(QED_MODE_IPGRE_TUNN)); 1359 } 1360 1361 /* Allocate LL2 interface if needed */ 1362 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1363 rc = qed_ll2_alloc_if(cdev); 1364 if (rc) 1365 goto err3; 1366 } 1367 if (IS_PF(cdev)) { 1368 hwfn = QED_LEADING_HWFN(cdev); 1369 drv_version.version = (params->drv_major << 24) | 1370 (params->drv_minor << 16) | 1371 (params->drv_rev << 8) | 1372 (params->drv_eng); 1373 strlcpy(drv_version.name, params->name, 1374 MCP_DRV_VER_STR_SIZE - 4); 1375 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1376 &drv_version); 1377 if (rc) { 1378 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1379 goto err4; 1380 } 1381 } 1382 1383 qed_reset_vport_stats(cdev); 1384 1385 return 0; 1386 1387 err4: 1388 qed_ll2_dealloc_if(cdev); 1389 err3: 1390 qed_hw_stop(cdev); 1391 err2: 1392 qed_hw_timers_stop_all(cdev); 1393 if (IS_PF(cdev)) 1394 qed_slowpath_irq_free(cdev); 1395 qed_free_stream_mem(cdev); 1396 qed_disable_msix(cdev); 1397 err1: 1398 qed_resc_free(cdev); 1399 err: 1400 if (IS_PF(cdev)) 1401 release_firmware(cdev->firmware); 1402 1403 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1404 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1405 qed_ptt_release(QED_LEADING_HWFN(cdev), 1406 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1407 1408 qed_iov_wq_stop(cdev, false); 1409 1410 qed_slowpath_wq_stop(cdev); 1411 1412 return rc; 1413 } 1414 1415 static int qed_slowpath_stop(struct qed_dev *cdev) 1416 { 1417 if (!cdev) 1418 return -ENODEV; 1419 1420 qed_slowpath_wq_stop(cdev); 1421 1422 qed_ll2_dealloc_if(cdev); 1423 1424 if (IS_PF(cdev)) { 1425 if (cdev->num_hwfns == 1) 1426 qed_ptt_release(QED_LEADING_HWFN(cdev), 1427 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1428 qed_free_stream_mem(cdev); 1429 if (IS_QED_ETH_IF(cdev)) 1430 qed_sriov_disable(cdev, true); 1431 } 1432 1433 qed_nic_stop(cdev); 1434 1435 if (IS_PF(cdev)) 1436 qed_slowpath_irq_free(cdev); 1437 1438 qed_disable_msix(cdev); 1439 1440 qed_resc_free(cdev); 1441 1442 qed_iov_wq_stop(cdev, true); 1443 1444 if (IS_PF(cdev)) 1445 release_firmware(cdev->firmware); 1446 1447 return 0; 1448 } 1449 1450 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1451 { 1452 int i; 1453 1454 memcpy(cdev->name, name, NAME_SIZE); 1455 for_each_hwfn(cdev, i) 1456 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1457 } 1458 1459 static u32 qed_sb_init(struct qed_dev *cdev, 1460 struct qed_sb_info *sb_info, 1461 void *sb_virt_addr, 1462 dma_addr_t sb_phy_addr, u16 sb_id, 1463 enum qed_sb_type type) 1464 { 1465 struct qed_hwfn *p_hwfn; 1466 struct qed_ptt *p_ptt; 1467 u16 rel_sb_id; 1468 u32 rc; 1469 1470 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1471 if (type == QED_SB_TYPE_L2_QUEUE) { 1472 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1473 rel_sb_id = sb_id / cdev->num_hwfns; 1474 } else { 1475 p_hwfn = QED_AFFIN_HWFN(cdev); 1476 rel_sb_id = sb_id; 1477 } 1478 1479 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1480 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1481 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1482 1483 if (IS_PF(p_hwfn->cdev)) { 1484 p_ptt = qed_ptt_acquire(p_hwfn); 1485 if (!p_ptt) 1486 return -EBUSY; 1487 1488 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1489 sb_phy_addr, rel_sb_id); 1490 qed_ptt_release(p_hwfn, p_ptt); 1491 } else { 1492 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1493 sb_phy_addr, rel_sb_id); 1494 } 1495 1496 return rc; 1497 } 1498 1499 static u32 qed_sb_release(struct qed_dev *cdev, 1500 struct qed_sb_info *sb_info, 1501 u16 sb_id, 1502 enum qed_sb_type type) 1503 { 1504 struct qed_hwfn *p_hwfn; 1505 u16 rel_sb_id; 1506 u32 rc; 1507 1508 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1509 if (type == QED_SB_TYPE_L2_QUEUE) { 1510 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1511 rel_sb_id = sb_id / cdev->num_hwfns; 1512 } else { 1513 p_hwfn = QED_AFFIN_HWFN(cdev); 1514 rel_sb_id = sb_id; 1515 } 1516 1517 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1518 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1519 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1520 1521 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1522 1523 return rc; 1524 } 1525 1526 static bool qed_can_link_change(struct qed_dev *cdev) 1527 { 1528 return true; 1529 } 1530 1531 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1532 const struct qed_link_params *params) 1533 { 1534 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1535 const struct qed_mfw_speed_map *map; 1536 u32 i; 1537 1538 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1539 ext_speed->autoneg = !!params->autoneg; 1540 1541 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1542 ext_speed->advertised_speeds = 0; 1543 1544 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1545 map = qed_mfw_ext_maps + i; 1546 1547 if (linkmode_intersects(params->adv_speeds, map->caps)) 1548 ext_speed->advertised_speeds |= map->mfw_val; 1549 } 1550 } 1551 1552 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1553 switch (params->forced_speed) { 1554 case SPEED_1000: 1555 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1556 break; 1557 case SPEED_10000: 1558 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1559 break; 1560 case SPEED_20000: 1561 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1562 break; 1563 case SPEED_25000: 1564 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1565 break; 1566 case SPEED_40000: 1567 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1568 break; 1569 case SPEED_50000: 1570 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1571 QED_EXT_SPEED_50G_R2; 1572 break; 1573 case SPEED_100000: 1574 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1575 QED_EXT_SPEED_100G_R4 | 1576 QED_EXT_SPEED_100G_P4; 1577 break; 1578 default: 1579 break; 1580 } 1581 } 1582 1583 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1584 return; 1585 1586 switch (params->forced_speed) { 1587 case SPEED_25000: 1588 switch (params->fec) { 1589 case FEC_FORCE_MODE_NONE: 1590 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1591 break; 1592 case FEC_FORCE_MODE_FIRECODE: 1593 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1594 break; 1595 case FEC_FORCE_MODE_RS: 1596 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1597 break; 1598 case FEC_FORCE_MODE_AUTO: 1599 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1600 ETH_EXT_FEC_25G_BASE_R | 1601 ETH_EXT_FEC_25G_NONE; 1602 break; 1603 default: 1604 break; 1605 } 1606 1607 break; 1608 case SPEED_40000: 1609 switch (params->fec) { 1610 case FEC_FORCE_MODE_NONE: 1611 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1612 break; 1613 case FEC_FORCE_MODE_FIRECODE: 1614 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1615 break; 1616 case FEC_FORCE_MODE_AUTO: 1617 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1618 ETH_EXT_FEC_40G_NONE; 1619 break; 1620 default: 1621 break; 1622 } 1623 1624 break; 1625 case SPEED_50000: 1626 switch (params->fec) { 1627 case FEC_FORCE_MODE_NONE: 1628 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1629 break; 1630 case FEC_FORCE_MODE_FIRECODE: 1631 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1632 break; 1633 case FEC_FORCE_MODE_RS: 1634 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1635 break; 1636 case FEC_FORCE_MODE_AUTO: 1637 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1638 ETH_EXT_FEC_50G_BASE_R | 1639 ETH_EXT_FEC_50G_NONE; 1640 break; 1641 default: 1642 break; 1643 } 1644 1645 break; 1646 case SPEED_100000: 1647 switch (params->fec) { 1648 case FEC_FORCE_MODE_NONE: 1649 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1650 break; 1651 case FEC_FORCE_MODE_FIRECODE: 1652 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1653 break; 1654 case FEC_FORCE_MODE_RS: 1655 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1656 break; 1657 case FEC_FORCE_MODE_AUTO: 1658 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1659 ETH_EXT_FEC_100G_BASE_R | 1660 ETH_EXT_FEC_100G_NONE; 1661 break; 1662 default: 1663 break; 1664 } 1665 1666 break; 1667 default: 1668 break; 1669 } 1670 } 1671 1672 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1673 { 1674 struct qed_mcp_link_params *link_params; 1675 struct qed_mcp_link_speed_params *speed; 1676 const struct qed_mfw_speed_map *map; 1677 struct qed_hwfn *hwfn; 1678 struct qed_ptt *ptt; 1679 int rc; 1680 u32 i; 1681 1682 if (!cdev) 1683 return -ENODEV; 1684 1685 /* The link should be set only once per PF */ 1686 hwfn = &cdev->hwfns[0]; 1687 1688 /* When VF wants to set link, force it to read the bulletin instead. 1689 * This mimics the PF behavior, where a noitification [both immediate 1690 * and possible later] would be generated when changing properties. 1691 */ 1692 if (IS_VF(cdev)) { 1693 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1694 return 0; 1695 } 1696 1697 ptt = qed_ptt_acquire(hwfn); 1698 if (!ptt) 1699 return -EBUSY; 1700 1701 link_params = qed_mcp_get_link_params(hwfn); 1702 if (!link_params) 1703 return -ENODATA; 1704 1705 speed = &link_params->speed; 1706 1707 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1708 speed->autoneg = !!params->autoneg; 1709 1710 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1711 speed->advertised_speeds = 0; 1712 1713 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1714 map = qed_mfw_legacy_maps + i; 1715 1716 if (linkmode_intersects(params->adv_speeds, map->caps)) 1717 speed->advertised_speeds |= map->mfw_val; 1718 } 1719 } 1720 1721 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1722 speed->forced_speed = params->forced_speed; 1723 1724 if (qed_mcp_is_ext_speed_supported(hwfn)) 1725 qed_set_ext_speed_params(link_params, params); 1726 1727 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1728 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1729 link_params->pause.autoneg = true; 1730 else 1731 link_params->pause.autoneg = false; 1732 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1733 link_params->pause.forced_rx = true; 1734 else 1735 link_params->pause.forced_rx = false; 1736 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1737 link_params->pause.forced_tx = true; 1738 else 1739 link_params->pause.forced_tx = false; 1740 } 1741 1742 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1743 switch (params->loopback_mode) { 1744 case QED_LINK_LOOPBACK_INT_PHY: 1745 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1746 break; 1747 case QED_LINK_LOOPBACK_EXT_PHY: 1748 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1749 break; 1750 case QED_LINK_LOOPBACK_EXT: 1751 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1752 break; 1753 case QED_LINK_LOOPBACK_MAC: 1754 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1755 break; 1756 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1757 link_params->loopback_mode = 1758 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1759 break; 1760 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1761 link_params->loopback_mode = 1762 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1763 break; 1764 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1765 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1766 break; 1767 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1768 link_params->loopback_mode = 1769 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1770 break; 1771 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1772 link_params->loopback_mode = 1773 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1774 break; 1775 default: 1776 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1777 break; 1778 } 1779 } 1780 1781 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1782 memcpy(&link_params->eee, ¶ms->eee, 1783 sizeof(link_params->eee)); 1784 1785 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1786 link_params->fec = params->fec; 1787 1788 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1789 1790 qed_ptt_release(hwfn, ptt); 1791 1792 return rc; 1793 } 1794 1795 static int qed_get_port_type(u32 media_type) 1796 { 1797 int port_type; 1798 1799 switch (media_type) { 1800 case MEDIA_SFPP_10G_FIBER: 1801 case MEDIA_SFP_1G_FIBER: 1802 case MEDIA_XFP_FIBER: 1803 case MEDIA_MODULE_FIBER: 1804 port_type = PORT_FIBRE; 1805 break; 1806 case MEDIA_DA_TWINAX: 1807 port_type = PORT_DA; 1808 break; 1809 case MEDIA_BASE_T: 1810 port_type = PORT_TP; 1811 break; 1812 case MEDIA_KR: 1813 case MEDIA_NOT_PRESENT: 1814 port_type = PORT_NONE; 1815 break; 1816 case MEDIA_UNSPECIFIED: 1817 default: 1818 port_type = PORT_OTHER; 1819 break; 1820 } 1821 return port_type; 1822 } 1823 1824 static int qed_get_link_data(struct qed_hwfn *hwfn, 1825 struct qed_mcp_link_params *params, 1826 struct qed_mcp_link_state *link, 1827 struct qed_mcp_link_capabilities *link_caps) 1828 { 1829 void *p; 1830 1831 if (!IS_PF(hwfn->cdev)) { 1832 qed_vf_get_link_params(hwfn, params); 1833 qed_vf_get_link_state(hwfn, link); 1834 qed_vf_get_link_caps(hwfn, link_caps); 1835 1836 return 0; 1837 } 1838 1839 p = qed_mcp_get_link_params(hwfn); 1840 if (!p) 1841 return -ENXIO; 1842 memcpy(params, p, sizeof(*params)); 1843 1844 p = qed_mcp_get_link_state(hwfn); 1845 if (!p) 1846 return -ENXIO; 1847 memcpy(link, p, sizeof(*link)); 1848 1849 p = qed_mcp_get_link_capabilities(hwfn); 1850 if (!p) 1851 return -ENXIO; 1852 memcpy(link_caps, p, sizeof(*link_caps)); 1853 1854 return 0; 1855 } 1856 1857 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1858 struct qed_ptt *ptt, u32 capability, 1859 unsigned long *if_caps) 1860 { 1861 u32 media_type, tcvr_state, tcvr_type; 1862 u32 speed_mask, board_cfg; 1863 1864 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1865 media_type = MEDIA_UNSPECIFIED; 1866 1867 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1868 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1869 1870 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1871 speed_mask = 0xFFFFFFFF; 1872 1873 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1874 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1875 1876 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1877 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1878 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1879 1880 switch (media_type) { 1881 case MEDIA_DA_TWINAX: 1882 phylink_set(if_caps, FIBRE); 1883 1884 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1885 phylink_set(if_caps, 20000baseKR2_Full); 1886 1887 /* For DAC media multiple speed capabilities are supported */ 1888 capability |= speed_mask; 1889 1890 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1891 phylink_set(if_caps, 1000baseKX_Full); 1892 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1893 phylink_set(if_caps, 10000baseCR_Full); 1894 1895 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1896 switch (tcvr_type) { 1897 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1898 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1899 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1900 phylink_set(if_caps, 40000baseCR4_Full); 1901 break; 1902 default: 1903 break; 1904 } 1905 1906 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1907 phylink_set(if_caps, 25000baseCR_Full); 1908 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1909 phylink_set(if_caps, 50000baseCR2_Full); 1910 1911 if (capability & 1912 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1913 switch (tcvr_type) { 1914 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1915 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1916 phylink_set(if_caps, 100000baseCR4_Full); 1917 break; 1918 default: 1919 break; 1920 } 1921 1922 break; 1923 case MEDIA_BASE_T: 1924 phylink_set(if_caps, TP); 1925 1926 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1927 if (capability & 1928 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1929 phylink_set(if_caps, 1000baseT_Full); 1930 if (capability & 1931 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1932 phylink_set(if_caps, 10000baseT_Full); 1933 } 1934 1935 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1936 phylink_set(if_caps, FIBRE); 1937 1938 switch (tcvr_type) { 1939 case ETH_TRANSCEIVER_TYPE_1000BASET: 1940 phylink_set(if_caps, 1000baseT_Full); 1941 break; 1942 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1943 phylink_set(if_caps, 10000baseT_Full); 1944 break; 1945 default: 1946 break; 1947 } 1948 } 1949 1950 break; 1951 case MEDIA_SFP_1G_FIBER: 1952 case MEDIA_SFPP_10G_FIBER: 1953 case MEDIA_XFP_FIBER: 1954 case MEDIA_MODULE_FIBER: 1955 phylink_set(if_caps, FIBRE); 1956 capability |= speed_mask; 1957 1958 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1959 switch (tcvr_type) { 1960 case ETH_TRANSCEIVER_TYPE_1G_LX: 1961 case ETH_TRANSCEIVER_TYPE_1G_SX: 1962 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1963 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1964 phylink_set(if_caps, 1000baseKX_Full); 1965 break; 1966 default: 1967 break; 1968 } 1969 1970 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1971 switch (tcvr_type) { 1972 case ETH_TRANSCEIVER_TYPE_10G_SR: 1973 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1974 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1975 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1976 phylink_set(if_caps, 10000baseSR_Full); 1977 break; 1978 case ETH_TRANSCEIVER_TYPE_10G_LR: 1979 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1980 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1981 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1982 phylink_set(if_caps, 10000baseLR_Full); 1983 break; 1984 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1985 phylink_set(if_caps, 10000baseLRM_Full); 1986 break; 1987 case ETH_TRANSCEIVER_TYPE_10G_ER: 1988 phylink_set(if_caps, 10000baseR_FEC); 1989 break; 1990 default: 1991 break; 1992 } 1993 1994 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1995 phylink_set(if_caps, 20000baseKR2_Full); 1996 1997 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1998 switch (tcvr_type) { 1999 case ETH_TRANSCEIVER_TYPE_25G_SR: 2000 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 2001 phylink_set(if_caps, 25000baseSR_Full); 2002 break; 2003 default: 2004 break; 2005 } 2006 2007 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2008 switch (tcvr_type) { 2009 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2010 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2011 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2012 phylink_set(if_caps, 40000baseLR4_Full); 2013 break; 2014 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2015 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2016 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2017 phylink_set(if_caps, 40000baseSR4_Full); 2018 break; 2019 default: 2020 break; 2021 } 2022 2023 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2024 phylink_set(if_caps, 50000baseKR2_Full); 2025 2026 if (capability & 2027 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2028 switch (tcvr_type) { 2029 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2030 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2031 phylink_set(if_caps, 100000baseSR4_Full); 2032 break; 2033 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2034 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2035 break; 2036 default: 2037 break; 2038 } 2039 2040 break; 2041 case MEDIA_KR: 2042 phylink_set(if_caps, Backplane); 2043 2044 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2045 phylink_set(if_caps, 20000baseKR2_Full); 2046 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2047 phylink_set(if_caps, 1000baseKX_Full); 2048 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2049 phylink_set(if_caps, 10000baseKR_Full); 2050 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2051 phylink_set(if_caps, 25000baseKR_Full); 2052 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2053 phylink_set(if_caps, 40000baseKR4_Full); 2054 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2055 phylink_set(if_caps, 50000baseKR2_Full); 2056 if (capability & 2057 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2058 phylink_set(if_caps, 100000baseKR4_Full); 2059 2060 break; 2061 case MEDIA_UNSPECIFIED: 2062 case MEDIA_NOT_PRESENT: 2063 default: 2064 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2065 "Unknown media and transceiver type;\n"); 2066 break; 2067 } 2068 } 2069 2070 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2071 { 2072 *speed_mask = 0; 2073 2074 if (caps & 2075 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2076 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2077 if (caps & QED_LINK_PARTNER_SPEED_10G) 2078 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2079 if (caps & QED_LINK_PARTNER_SPEED_20G) 2080 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2081 if (caps & QED_LINK_PARTNER_SPEED_25G) 2082 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2083 if (caps & QED_LINK_PARTNER_SPEED_40G) 2084 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2085 if (caps & QED_LINK_PARTNER_SPEED_50G) 2086 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2087 if (caps & QED_LINK_PARTNER_SPEED_100G) 2088 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2089 } 2090 2091 static void qed_fill_link(struct qed_hwfn *hwfn, 2092 struct qed_ptt *ptt, 2093 struct qed_link_output *if_link) 2094 { 2095 struct qed_mcp_link_capabilities link_caps; 2096 struct qed_mcp_link_params params; 2097 struct qed_mcp_link_state link; 2098 u32 media_type, speed_mask; 2099 2100 memset(if_link, 0, sizeof(*if_link)); 2101 2102 /* Prepare source inputs */ 2103 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2104 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2105 return; 2106 } 2107 2108 /* Set the link parameters to pass to protocol driver */ 2109 if (link.link_up) 2110 if_link->link_up = true; 2111 2112 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2113 if (link_caps.default_ext_autoneg) 2114 phylink_set(if_link->supported_caps, Autoneg); 2115 2116 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2117 2118 if (params.ext_speed.autoneg) 2119 phylink_set(if_link->advertised_caps, Autoneg); 2120 else 2121 phylink_clear(if_link->advertised_caps, Autoneg); 2122 2123 qed_fill_link_capability(hwfn, ptt, 2124 params.ext_speed.advertised_speeds, 2125 if_link->advertised_caps); 2126 } else { 2127 if (link_caps.default_speed_autoneg) 2128 phylink_set(if_link->supported_caps, Autoneg); 2129 2130 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2131 2132 if (params.speed.autoneg) 2133 phylink_set(if_link->advertised_caps, Autoneg); 2134 else 2135 phylink_clear(if_link->advertised_caps, Autoneg); 2136 } 2137 2138 if (params.pause.autoneg || 2139 (params.pause.forced_rx && params.pause.forced_tx)) 2140 phylink_set(if_link->supported_caps, Asym_Pause); 2141 if (params.pause.autoneg || params.pause.forced_rx || 2142 params.pause.forced_tx) 2143 phylink_set(if_link->supported_caps, Pause); 2144 2145 if_link->sup_fec = link_caps.fec_default; 2146 if_link->active_fec = params.fec; 2147 2148 /* Fill link advertised capability */ 2149 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2150 if_link->advertised_caps); 2151 2152 /* Fill link supported capability */ 2153 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2154 if_link->supported_caps); 2155 2156 /* Fill partner advertised capability */ 2157 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2158 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2159 2160 if (link.link_up) 2161 if_link->speed = link.speed; 2162 2163 /* TODO - fill duplex properly */ 2164 if_link->duplex = DUPLEX_FULL; 2165 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2166 if_link->port = qed_get_port_type(media_type); 2167 2168 if_link->autoneg = params.speed.autoneg; 2169 2170 if (params.pause.autoneg) 2171 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2172 if (params.pause.forced_rx) 2173 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2174 if (params.pause.forced_tx) 2175 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2176 2177 if (link.an_complete) 2178 phylink_set(if_link->lp_caps, Autoneg); 2179 if (link.partner_adv_pause) 2180 phylink_set(if_link->lp_caps, Pause); 2181 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2182 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2183 phylink_set(if_link->lp_caps, Asym_Pause); 2184 2185 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2186 if_link->eee_supported = false; 2187 } else { 2188 if_link->eee_supported = true; 2189 if_link->eee_active = link.eee_active; 2190 if_link->sup_caps = link_caps.eee_speed_caps; 2191 /* MFW clears adv_caps on eee disable; use configured value */ 2192 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2193 params.eee.adv_caps; 2194 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2195 if_link->eee.enable = params.eee.enable; 2196 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2197 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2198 } 2199 } 2200 2201 static void qed_get_current_link(struct qed_dev *cdev, 2202 struct qed_link_output *if_link) 2203 { 2204 struct qed_hwfn *hwfn; 2205 struct qed_ptt *ptt; 2206 int i; 2207 2208 hwfn = &cdev->hwfns[0]; 2209 if (IS_PF(cdev)) { 2210 ptt = qed_ptt_acquire(hwfn); 2211 if (ptt) { 2212 qed_fill_link(hwfn, ptt, if_link); 2213 qed_ptt_release(hwfn, ptt); 2214 } else { 2215 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2216 } 2217 } else { 2218 qed_fill_link(hwfn, NULL, if_link); 2219 } 2220 2221 for_each_hwfn(cdev, i) 2222 qed_inform_vf_link_state(&cdev->hwfns[i]); 2223 } 2224 2225 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2226 { 2227 void *cookie = hwfn->cdev->ops_cookie; 2228 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2229 struct qed_link_output if_link; 2230 2231 qed_fill_link(hwfn, ptt, &if_link); 2232 qed_inform_vf_link_state(hwfn); 2233 2234 if (IS_LEAD_HWFN(hwfn) && cookie) 2235 op->link_update(cookie, &if_link); 2236 } 2237 2238 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2239 { 2240 void *cookie = hwfn->cdev->ops_cookie; 2241 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2242 2243 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2244 op->bw_update(cookie); 2245 } 2246 2247 static int qed_drain(struct qed_dev *cdev) 2248 { 2249 struct qed_hwfn *hwfn; 2250 struct qed_ptt *ptt; 2251 int i, rc; 2252 2253 if (IS_VF(cdev)) 2254 return 0; 2255 2256 for_each_hwfn(cdev, i) { 2257 hwfn = &cdev->hwfns[i]; 2258 ptt = qed_ptt_acquire(hwfn); 2259 if (!ptt) { 2260 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2261 return -EBUSY; 2262 } 2263 rc = qed_mcp_drain(hwfn, ptt); 2264 qed_ptt_release(hwfn, ptt); 2265 if (rc) 2266 return rc; 2267 } 2268 2269 return 0; 2270 } 2271 2272 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2273 struct qed_nvm_image_att *nvm_image, 2274 u32 *crc) 2275 { 2276 u8 *buf = NULL; 2277 int rc; 2278 2279 /* Allocate a buffer for holding the nvram image */ 2280 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2281 if (!buf) 2282 return -ENOMEM; 2283 2284 /* Read image into buffer */ 2285 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2286 buf, nvm_image->length); 2287 if (rc) { 2288 DP_ERR(cdev, "Failed reading image from nvm\n"); 2289 goto out; 2290 } 2291 2292 /* Convert the buffer into big-endian format (excluding the 2293 * closing 4 bytes of CRC). 2294 */ 2295 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2296 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2297 2298 /* Calc CRC for the "actual" image buffer, i.e. not including 2299 * the last 4 CRC bytes. 2300 */ 2301 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2302 *crc = (__force u32)cpu_to_be32p(crc); 2303 2304 out: 2305 kfree(buf); 2306 2307 return rc; 2308 } 2309 2310 /* Binary file format - 2311 * /----------------------------------------------------------------------\ 2312 * 0B | 0x4 [command index] | 2313 * 4B | image_type | Options | Number of register settings | 2314 * 8B | Value | 2315 * 12B | Mask | 2316 * 16B | Offset | 2317 * \----------------------------------------------------------------------/ 2318 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2319 * Options - 0'b - Calculate & Update CRC for image 2320 */ 2321 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2322 bool *check_resp) 2323 { 2324 struct qed_nvm_image_att nvm_image; 2325 struct qed_hwfn *p_hwfn; 2326 bool is_crc = false; 2327 u32 image_type; 2328 int rc = 0, i; 2329 u16 len; 2330 2331 *data += 4; 2332 image_type = **data; 2333 p_hwfn = QED_LEADING_HWFN(cdev); 2334 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2335 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2336 break; 2337 if (i == p_hwfn->nvm_info.num_images) { 2338 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2339 image_type); 2340 return -ENOENT; 2341 } 2342 2343 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2344 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2345 2346 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2347 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2348 **data, image_type, nvm_image.start_addr, 2349 nvm_image.start_addr + nvm_image.length - 1); 2350 (*data)++; 2351 is_crc = !!(**data & BIT(0)); 2352 (*data)++; 2353 len = *((u16 *)*data); 2354 *data += 2; 2355 if (is_crc) { 2356 u32 crc = 0; 2357 2358 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2359 if (rc) { 2360 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2361 goto exit; 2362 } 2363 2364 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2365 (nvm_image.start_addr + 2366 nvm_image.length - 4), (u8 *)&crc, 4); 2367 if (rc) 2368 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2369 nvm_image.start_addr + nvm_image.length - 4, rc); 2370 goto exit; 2371 } 2372 2373 /* Iterate over the values for setting */ 2374 while (len) { 2375 u32 offset, mask, value, cur_value; 2376 u8 buf[4]; 2377 2378 value = *((u32 *)*data); 2379 *data += 4; 2380 mask = *((u32 *)*data); 2381 *data += 4; 2382 offset = *((u32 *)*data); 2383 *data += 4; 2384 2385 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2386 4); 2387 if (rc) { 2388 DP_ERR(cdev, "Failed reading from %08x\n", 2389 nvm_image.start_addr + offset); 2390 goto exit; 2391 } 2392 2393 cur_value = le32_to_cpu(*((__le32 *)buf)); 2394 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2395 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2396 nvm_image.start_addr + offset, cur_value, 2397 (cur_value & ~mask) | (value & mask), value, mask); 2398 value = (value & mask) | (cur_value & ~mask); 2399 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2400 nvm_image.start_addr + offset, 2401 (u8 *)&value, 4); 2402 if (rc) { 2403 DP_ERR(cdev, "Failed writing to %08x\n", 2404 nvm_image.start_addr + offset); 2405 goto exit; 2406 } 2407 2408 len--; 2409 } 2410 exit: 2411 return rc; 2412 } 2413 2414 /* Binary file format - 2415 * /----------------------------------------------------------------------\ 2416 * 0B | 0x3 [command index] | 2417 * 4B | b'0: check_response? | b'1-31 reserved | 2418 * 8B | File-type | reserved | 2419 * 12B | Image length in bytes | 2420 * \----------------------------------------------------------------------/ 2421 * Start a new file of the provided type 2422 */ 2423 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2424 const u8 **data, bool *check_resp) 2425 { 2426 u32 file_type, file_size = 0; 2427 int rc; 2428 2429 *data += 4; 2430 *check_resp = !!(**data & BIT(0)); 2431 *data += 4; 2432 file_type = **data; 2433 2434 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2435 "About to start a new file of type %02x\n", file_type); 2436 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2437 *data += 4; 2438 file_size = *((u32 *)(*data)); 2439 } 2440 2441 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2442 (u8 *)(&file_size), 4); 2443 *data += 4; 2444 2445 return rc; 2446 } 2447 2448 /* Binary file format - 2449 * /----------------------------------------------------------------------\ 2450 * 0B | 0x2 [command index] | 2451 * 4B | Length in bytes | 2452 * 8B | b'0: check_response? | b'1-31 reserved | 2453 * 12B | Offset in bytes | 2454 * 16B | Data ... | 2455 * \----------------------------------------------------------------------/ 2456 * Write data as part of a file that was previously started. Data should be 2457 * of length equal to that provided in the message 2458 */ 2459 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2460 const u8 **data, bool *check_resp) 2461 { 2462 u32 offset, len; 2463 int rc; 2464 2465 *data += 4; 2466 len = *((u32 *)(*data)); 2467 *data += 4; 2468 *check_resp = !!(**data & BIT(0)); 2469 *data += 4; 2470 offset = *((u32 *)(*data)); 2471 *data += 4; 2472 2473 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2474 "About to write File-data: %08x bytes to offset %08x\n", 2475 len, offset); 2476 2477 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2478 (char *)(*data), len); 2479 *data += len; 2480 2481 return rc; 2482 } 2483 2484 /* Binary file format [General header] - 2485 * /----------------------------------------------------------------------\ 2486 * 0B | QED_NVM_SIGNATURE | 2487 * 4B | Length in bytes | 2488 * 8B | Highest command in this batchfile | Reserved | 2489 * \----------------------------------------------------------------------/ 2490 */ 2491 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2492 const struct firmware *image, 2493 const u8 **data) 2494 { 2495 u32 signature, len; 2496 2497 /* Check minimum size */ 2498 if (image->size < 12) { 2499 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2500 return -EINVAL; 2501 } 2502 2503 /* Check signature */ 2504 signature = *((u32 *)(*data)); 2505 if (signature != QED_NVM_SIGNATURE) { 2506 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2507 return -EINVAL; 2508 } 2509 2510 *data += 4; 2511 /* Validate internal size equals the image-size */ 2512 len = *((u32 *)(*data)); 2513 if (len != image->size) { 2514 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2515 len, (u32)image->size); 2516 return -EINVAL; 2517 } 2518 2519 *data += 4; 2520 /* Make sure driver familiar with all commands necessary for this */ 2521 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2522 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2523 *((u16 *)(*data))); 2524 return -EINVAL; 2525 } 2526 2527 *data += 4; 2528 2529 return 0; 2530 } 2531 2532 /* Binary file format - 2533 * /----------------------------------------------------------------------\ 2534 * 0B | 0x5 [command index] | 2535 * 4B | Number of config attributes | Reserved | 2536 * 4B | Config ID | Entity ID | Length | 2537 * 4B | Value | 2538 * | | 2539 * \----------------------------------------------------------------------/ 2540 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2541 * 'Number of config attributes'. 2542 * 2543 * The API parses config attributes from the user provided buffer and flashes 2544 * them to the respective NVM path using Management FW inerface. 2545 */ 2546 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2547 { 2548 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2549 u8 entity_id, len, buf[32]; 2550 bool need_nvm_init = true; 2551 struct qed_ptt *ptt; 2552 u16 cfg_id, count; 2553 int rc = 0, i; 2554 u32 flags; 2555 2556 ptt = qed_ptt_acquire(hwfn); 2557 if (!ptt) 2558 return -EAGAIN; 2559 2560 /* NVM CFG ID attribute header */ 2561 *data += 4; 2562 count = *((u16 *)*data); 2563 *data += 4; 2564 2565 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2566 "Read config ids: num_attrs = %0d\n", count); 2567 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2568 * arithmetic operations in the implementation. 2569 */ 2570 for (i = 1; i <= count; i++) { 2571 cfg_id = *((u16 *)*data); 2572 *data += 2; 2573 entity_id = **data; 2574 (*data)++; 2575 len = **data; 2576 (*data)++; 2577 memcpy(buf, *data, len); 2578 *data += len; 2579 2580 flags = 0; 2581 if (need_nvm_init) { 2582 flags |= QED_NVM_CFG_OPTION_INIT; 2583 need_nvm_init = false; 2584 } 2585 2586 /* Commit to flash and free the resources */ 2587 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2588 flags |= QED_NVM_CFG_OPTION_COMMIT | 2589 QED_NVM_CFG_OPTION_FREE; 2590 need_nvm_init = true; 2591 } 2592 2593 if (entity_id) 2594 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2595 2596 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2597 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2598 entity_id, len); 2599 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2600 buf, len); 2601 if (rc) { 2602 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2603 break; 2604 } 2605 } 2606 2607 qed_ptt_release(hwfn, ptt); 2608 2609 return rc; 2610 } 2611 2612 #define QED_MAX_NVM_BUF_LEN 32 2613 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2614 { 2615 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2616 u8 buf[QED_MAX_NVM_BUF_LEN]; 2617 struct qed_ptt *ptt; 2618 u32 len; 2619 int rc; 2620 2621 ptt = qed_ptt_acquire(hwfn); 2622 if (!ptt) 2623 return QED_MAX_NVM_BUF_LEN; 2624 2625 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2626 &len); 2627 if (rc || !len) { 2628 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2629 len = QED_MAX_NVM_BUF_LEN; 2630 } 2631 2632 qed_ptt_release(hwfn, ptt); 2633 2634 return len; 2635 } 2636 2637 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2638 u32 cmd, u32 entity_id) 2639 { 2640 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2641 struct qed_ptt *ptt; 2642 u32 flags, len; 2643 int rc = 0; 2644 2645 ptt = qed_ptt_acquire(hwfn); 2646 if (!ptt) 2647 return -EAGAIN; 2648 2649 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2650 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2651 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2652 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2653 if (rc) 2654 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2655 2656 qed_ptt_release(hwfn, ptt); 2657 2658 return rc; 2659 } 2660 2661 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2662 { 2663 const struct firmware *image; 2664 const u8 *data, *data_end; 2665 u32 cmd_type; 2666 int rc; 2667 2668 rc = request_firmware(&image, name, &cdev->pdev->dev); 2669 if (rc) { 2670 DP_ERR(cdev, "Failed to find '%s'\n", name); 2671 return rc; 2672 } 2673 2674 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2675 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2676 name, image->data, (u32)image->size); 2677 data = image->data; 2678 data_end = data + image->size; 2679 2680 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2681 if (rc) 2682 goto exit; 2683 2684 while (data < data_end) { 2685 bool check_resp = false; 2686 2687 /* Parse the actual command */ 2688 cmd_type = *((u32 *)data); 2689 switch (cmd_type) { 2690 case QED_NVM_FLASH_CMD_FILE_DATA: 2691 rc = qed_nvm_flash_image_file_data(cdev, &data, 2692 &check_resp); 2693 break; 2694 case QED_NVM_FLASH_CMD_FILE_START: 2695 rc = qed_nvm_flash_image_file_start(cdev, &data, 2696 &check_resp); 2697 break; 2698 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2699 rc = qed_nvm_flash_image_access(cdev, &data, 2700 &check_resp); 2701 break; 2702 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2703 rc = qed_nvm_flash_cfg_write(cdev, &data); 2704 break; 2705 default: 2706 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2707 rc = -EINVAL; 2708 goto exit; 2709 } 2710 2711 if (rc) { 2712 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2713 goto exit; 2714 } 2715 2716 /* Check response if needed */ 2717 if (check_resp) { 2718 u32 mcp_response = 0; 2719 2720 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2721 DP_ERR(cdev, "Failed getting MCP response\n"); 2722 rc = -EINVAL; 2723 goto exit; 2724 } 2725 2726 switch (mcp_response & FW_MSG_CODE_MASK) { 2727 case FW_MSG_CODE_OK: 2728 case FW_MSG_CODE_NVM_OK: 2729 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2730 case FW_MSG_CODE_PHY_OK: 2731 break; 2732 default: 2733 DP_ERR(cdev, "MFW returns error: %08x\n", 2734 mcp_response); 2735 rc = -EINVAL; 2736 goto exit; 2737 } 2738 } 2739 } 2740 2741 exit: 2742 release_firmware(image); 2743 2744 return rc; 2745 } 2746 2747 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2748 u8 *buf, u16 len) 2749 { 2750 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2751 2752 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2753 } 2754 2755 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2756 { 2757 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2758 void *cookie = p_hwfn->cdev->ops_cookie; 2759 2760 if (ops && ops->schedule_recovery_handler) 2761 ops->schedule_recovery_handler(cookie); 2762 } 2763 2764 static const char * const qed_hw_err_type_descr[] = { 2765 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2766 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2767 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2768 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2769 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2770 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2771 [QED_HW_ERR_LAST] = "Unknown", 2772 }; 2773 2774 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2775 enum qed_hw_err_type err_type) 2776 { 2777 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2778 void *cookie = p_hwfn->cdev->ops_cookie; 2779 const char *err_str; 2780 2781 if (err_type > QED_HW_ERR_LAST) 2782 err_type = QED_HW_ERR_LAST; 2783 err_str = qed_hw_err_type_descr[err_type]; 2784 2785 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2786 2787 /* Call the HW error handler of the protocol driver. 2788 * If it is not available - perform a minimal handling of preventing 2789 * HW attentions from being reasserted. 2790 */ 2791 if (ops && ops->schedule_hw_err_handler) 2792 ops->schedule_hw_err_handler(cookie, err_type); 2793 else 2794 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2795 } 2796 2797 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2798 void *handle) 2799 { 2800 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2801 } 2802 2803 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2804 { 2805 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2806 struct qed_ptt *ptt; 2807 int status = 0; 2808 2809 ptt = qed_ptt_acquire(hwfn); 2810 if (!ptt) 2811 return -EAGAIN; 2812 2813 status = qed_mcp_set_led(hwfn, ptt, mode); 2814 2815 qed_ptt_release(hwfn, ptt); 2816 2817 return status; 2818 } 2819 2820 static int qed_recovery_process(struct qed_dev *cdev) 2821 { 2822 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2823 struct qed_ptt *p_ptt; 2824 int rc = 0; 2825 2826 p_ptt = qed_ptt_acquire(p_hwfn); 2827 if (!p_ptt) 2828 return -EAGAIN; 2829 2830 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2831 2832 qed_ptt_release(p_hwfn, p_ptt); 2833 2834 return rc; 2835 } 2836 2837 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2838 { 2839 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2840 struct qed_ptt *ptt; 2841 int rc = 0; 2842 2843 if (IS_VF(cdev)) 2844 return 0; 2845 2846 ptt = qed_ptt_acquire(hwfn); 2847 if (!ptt) 2848 return -EAGAIN; 2849 2850 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2851 : QED_OV_WOL_DISABLED); 2852 if (rc) 2853 goto out; 2854 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2855 2856 out: 2857 qed_ptt_release(hwfn, ptt); 2858 return rc; 2859 } 2860 2861 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2862 { 2863 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2864 struct qed_ptt *ptt; 2865 int status = 0; 2866 2867 if (IS_VF(cdev)) 2868 return 0; 2869 2870 ptt = qed_ptt_acquire(hwfn); 2871 if (!ptt) 2872 return -EAGAIN; 2873 2874 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2875 QED_OV_DRIVER_STATE_ACTIVE : 2876 QED_OV_DRIVER_STATE_DISABLED); 2877 2878 qed_ptt_release(hwfn, ptt); 2879 2880 return status; 2881 } 2882 2883 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2884 { 2885 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2886 struct qed_ptt *ptt; 2887 int status = 0; 2888 2889 if (IS_VF(cdev)) 2890 return 0; 2891 2892 ptt = qed_ptt_acquire(hwfn); 2893 if (!ptt) 2894 return -EAGAIN; 2895 2896 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2897 if (status) 2898 goto out; 2899 2900 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2901 2902 out: 2903 qed_ptt_release(hwfn, ptt); 2904 return status; 2905 } 2906 2907 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2908 { 2909 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2910 struct qed_ptt *ptt; 2911 int status = 0; 2912 2913 if (IS_VF(cdev)) 2914 return 0; 2915 2916 ptt = qed_ptt_acquire(hwfn); 2917 if (!ptt) 2918 return -EAGAIN; 2919 2920 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2921 if (status) 2922 goto out; 2923 2924 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2925 2926 out: 2927 qed_ptt_release(hwfn, ptt); 2928 return status; 2929 } 2930 2931 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2932 u8 dev_addr, u32 offset, u32 len) 2933 { 2934 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2935 struct qed_ptt *ptt; 2936 int rc = 0; 2937 2938 if (IS_VF(cdev)) 2939 return 0; 2940 2941 ptt = qed_ptt_acquire(hwfn); 2942 if (!ptt) 2943 return -EAGAIN; 2944 2945 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2946 offset, len, buf); 2947 2948 qed_ptt_release(hwfn, ptt); 2949 2950 return rc; 2951 } 2952 2953 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2954 { 2955 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2956 struct qed_ptt *ptt; 2957 int rc = 0; 2958 2959 if (IS_VF(cdev)) 2960 return 0; 2961 2962 ptt = qed_ptt_acquire(hwfn); 2963 if (!ptt) 2964 return -EAGAIN; 2965 2966 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2967 2968 qed_ptt_release(hwfn, ptt); 2969 2970 return rc; 2971 } 2972 2973 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2974 { 2975 return QED_AFFIN_HWFN_IDX(cdev); 2976 } 2977 2978 static struct qed_selftest_ops qed_selftest_ops_pass = { 2979 .selftest_memory = &qed_selftest_memory, 2980 .selftest_interrupt = &qed_selftest_interrupt, 2981 .selftest_register = &qed_selftest_register, 2982 .selftest_clock = &qed_selftest_clock, 2983 .selftest_nvram = &qed_selftest_nvram, 2984 }; 2985 2986 const struct qed_common_ops qed_common_ops_pass = { 2987 .selftest = &qed_selftest_ops_pass, 2988 .probe = &qed_probe, 2989 .remove = &qed_remove, 2990 .set_power_state = &qed_set_power_state, 2991 .set_name = &qed_set_name, 2992 .update_pf_params = &qed_update_pf_params, 2993 .slowpath_start = &qed_slowpath_start, 2994 .slowpath_stop = &qed_slowpath_stop, 2995 .set_fp_int = &qed_set_int_fp, 2996 .get_fp_int = &qed_get_int_fp, 2997 .sb_init = &qed_sb_init, 2998 .sb_release = &qed_sb_release, 2999 .simd_handler_config = &qed_simd_handler_config, 3000 .simd_handler_clean = &qed_simd_handler_clean, 3001 .dbg_grc = &qed_dbg_grc, 3002 .dbg_grc_size = &qed_dbg_grc_size, 3003 .can_link_change = &qed_can_link_change, 3004 .set_link = &qed_set_link, 3005 .get_link = &qed_get_current_link, 3006 .drain = &qed_drain, 3007 .update_msglvl = &qed_init_dp, 3008 .devlink_register = qed_devlink_register, 3009 .devlink_unregister = qed_devlink_unregister, 3010 .dbg_all_data = &qed_dbg_all_data, 3011 .dbg_all_data_size = &qed_dbg_all_data_size, 3012 .chain_alloc = &qed_chain_alloc, 3013 .chain_free = &qed_chain_free, 3014 .nvm_flash = &qed_nvm_flash, 3015 .nvm_get_image = &qed_nvm_get_image, 3016 .set_coalesce = &qed_set_coalesce, 3017 .set_led = &qed_set_led, 3018 .recovery_process = &qed_recovery_process, 3019 .recovery_prolog = &qed_recovery_prolog, 3020 .attn_clr_enable = &qed_int_attn_clr_enable, 3021 .update_drv_state = &qed_update_drv_state, 3022 .update_mac = &qed_update_mac, 3023 .update_mtu = &qed_update_mtu, 3024 .update_wol = &qed_update_wol, 3025 .db_recovery_add = &qed_db_recovery_add, 3026 .db_recovery_del = &qed_db_recovery_del, 3027 .read_module_eeprom = &qed_read_module_eeprom, 3028 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3029 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3030 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3031 .set_grc_config = &qed_set_grc_config, 3032 }; 3033 3034 void qed_get_protocol_stats(struct qed_dev *cdev, 3035 enum qed_mcp_protocol_type type, 3036 union qed_mcp_protocol_stats *stats) 3037 { 3038 struct qed_eth_stats eth_stats; 3039 3040 memset(stats, 0, sizeof(*stats)); 3041 3042 switch (type) { 3043 case QED_MCP_LAN_STATS: 3044 qed_get_vport_stats(cdev, ð_stats); 3045 stats->lan_stats.ucast_rx_pkts = 3046 eth_stats.common.rx_ucast_pkts; 3047 stats->lan_stats.ucast_tx_pkts = 3048 eth_stats.common.tx_ucast_pkts; 3049 stats->lan_stats.fcs_err = -1; 3050 break; 3051 case QED_MCP_FCOE_STATS: 3052 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 3053 break; 3054 case QED_MCP_ISCSI_STATS: 3055 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 3056 break; 3057 default: 3058 DP_VERBOSE(cdev, QED_MSG_SP, 3059 "Invalid protocol type = %d\n", type); 3060 return; 3061 } 3062 } 3063 3064 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3065 { 3066 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3067 "Scheduling slowpath task [Flag: %d]\n", 3068 QED_SLOWPATH_MFW_TLV_REQ); 3069 smp_mb__before_atomic(); 3070 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3071 smp_mb__after_atomic(); 3072 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3073 3074 return 0; 3075 } 3076 3077 static void 3078 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3079 { 3080 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3081 struct qed_eth_stats_common *p_common; 3082 struct qed_generic_tlvs gen_tlvs; 3083 struct qed_eth_stats stats; 3084 int i; 3085 3086 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3087 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3088 3089 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3090 tlv->flags.ipv4_csum_offload = true; 3091 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3092 tlv->flags.lso_supported = true; 3093 tlv->flags.b_set = true; 3094 3095 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3096 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3097 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3098 tlv->mac_set[i] = true; 3099 } 3100 } 3101 3102 qed_get_vport_stats(cdev, &stats); 3103 p_common = &stats.common; 3104 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3105 p_common->rx_bcast_pkts; 3106 tlv->rx_frames_set = true; 3107 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3108 p_common->rx_bcast_bytes; 3109 tlv->rx_bytes_set = true; 3110 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3111 p_common->tx_bcast_pkts; 3112 tlv->tx_frames_set = true; 3113 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3114 p_common->tx_bcast_bytes; 3115 tlv->rx_bytes_set = true; 3116 } 3117 3118 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3119 union qed_mfw_tlv_data *tlv_buf) 3120 { 3121 struct qed_dev *cdev = hwfn->cdev; 3122 struct qed_common_cb_ops *ops; 3123 3124 ops = cdev->protocol_ops.common; 3125 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3126 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3127 return -EINVAL; 3128 } 3129 3130 switch (type) { 3131 case QED_MFW_TLV_GENERIC: 3132 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3133 break; 3134 case QED_MFW_TLV_ETH: 3135 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3136 break; 3137 case QED_MFW_TLV_FCOE: 3138 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3139 break; 3140 case QED_MFW_TLV_ISCSI: 3141 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3142 break; 3143 default: 3144 break; 3145 } 3146 3147 return 0; 3148 } 3149