1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/aer.h> 27 #include <linux/phylink.h> 28 29 #include "qed.h" 30 #include "qed_sriov.h" 31 #include "qed_sp.h" 32 #include "qed_dev_api.h" 33 #include "qed_ll2.h" 34 #include "qed_fcoe.h" 35 #include "qed_iscsi.h" 36 37 #include "qed_mcp.h" 38 #include "qed_reg_addr.h" 39 #include "qed_hw.h" 40 #include "qed_selftest.h" 41 #include "qed_debug.h" 42 43 #define QED_ROCE_QPS (8192) 44 #define QED_ROCE_DPIS (8) 45 #define QED_RDMA_SRQS QED_ROCE_QPS 46 #define QED_NVM_CFG_GET_FLAGS 0xA 47 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 48 #define QED_NVM_CFG_MAX_ATTRS 50 49 50 static char version[] = 51 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 52 53 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 54 MODULE_LICENSE("GPL"); 55 MODULE_VERSION(DRV_MODULE_VERSION); 56 57 #define FW_FILE_VERSION \ 58 __stringify(FW_MAJOR_VERSION) "." \ 59 __stringify(FW_MINOR_VERSION) "." \ 60 __stringify(FW_REVISION_VERSION) "." \ 61 __stringify(FW_ENGINEERING_VERSION) 62 63 #define QED_FW_FILE_NAME \ 64 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 65 66 MODULE_FIRMWARE(QED_FW_FILE_NAME); 67 68 static int __init qed_init(void) 69 { 70 pr_info("%s", version); 71 72 return 0; 73 } 74 75 static void __exit qed_cleanup(void) 76 { 77 pr_notice("qed_cleanup called\n"); 78 } 79 80 module_init(qed_init); 81 module_exit(qed_cleanup); 82 83 /* Check if the DMA controller on the machine can properly handle the DMA 84 * addressing required by the device. 85 */ 86 static int qed_set_coherency_mask(struct qed_dev *cdev) 87 { 88 struct device *dev = &cdev->pdev->dev; 89 90 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 91 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 92 DP_NOTICE(cdev, 93 "Can't request 64-bit consistent allocations\n"); 94 return -EIO; 95 } 96 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 97 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 98 return -EIO; 99 } 100 101 return 0; 102 } 103 104 static void qed_free_pci(struct qed_dev *cdev) 105 { 106 struct pci_dev *pdev = cdev->pdev; 107 108 pci_disable_pcie_error_reporting(pdev); 109 110 if (cdev->doorbells && cdev->db_size) 111 iounmap(cdev->doorbells); 112 if (cdev->regview) 113 iounmap(cdev->regview); 114 if (atomic_read(&pdev->enable_cnt) == 1) 115 pci_release_regions(pdev); 116 117 pci_disable_device(pdev); 118 } 119 120 #define PCI_REVISION_ID_ERROR_VAL 0xff 121 122 /* Performs PCI initializations as well as initializing PCI-related parameters 123 * in the device structrue. Returns 0 in case of success. 124 */ 125 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 126 { 127 u8 rev_id; 128 int rc; 129 130 cdev->pdev = pdev; 131 132 rc = pci_enable_device(pdev); 133 if (rc) { 134 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 135 goto err0; 136 } 137 138 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 139 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 140 rc = -EIO; 141 goto err1; 142 } 143 144 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 145 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 146 rc = -EIO; 147 goto err1; 148 } 149 150 if (atomic_read(&pdev->enable_cnt) == 1) { 151 rc = pci_request_regions(pdev, "qed"); 152 if (rc) { 153 DP_NOTICE(cdev, 154 "Failed to request PCI memory resources\n"); 155 goto err1; 156 } 157 pci_set_master(pdev); 158 pci_save_state(pdev); 159 } 160 161 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 162 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 163 DP_NOTICE(cdev, 164 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 165 rev_id); 166 rc = -ENODEV; 167 goto err2; 168 } 169 if (!pci_is_pcie(pdev)) { 170 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 171 rc = -EIO; 172 goto err2; 173 } 174 175 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 176 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 177 DP_NOTICE(cdev, "Cannot find power management capability\n"); 178 179 rc = qed_set_coherency_mask(cdev); 180 if (rc) 181 goto err2; 182 183 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 184 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 185 cdev->pci_params.irq = pdev->irq; 186 187 cdev->regview = pci_ioremap_bar(pdev, 0); 188 if (!cdev->regview) { 189 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 190 rc = -ENOMEM; 191 goto err2; 192 } 193 194 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 195 cdev->db_size = pci_resource_len(cdev->pdev, 2); 196 if (!cdev->db_size) { 197 if (IS_PF(cdev)) { 198 DP_NOTICE(cdev, "No Doorbell bar available\n"); 199 return -EINVAL; 200 } else { 201 return 0; 202 } 203 } 204 205 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 206 207 if (!cdev->doorbells) { 208 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 209 return -ENOMEM; 210 } 211 212 /* AER (Advanced Error reporting) configuration */ 213 rc = pci_enable_pcie_error_reporting(pdev); 214 if (rc) 215 DP_VERBOSE(cdev, NETIF_MSG_DRV, 216 "Failed to configure PCIe AER [%d]\n", rc); 217 218 return 0; 219 220 err2: 221 pci_release_regions(pdev); 222 err1: 223 pci_disable_device(pdev); 224 err0: 225 return rc; 226 } 227 228 int qed_fill_dev_info(struct qed_dev *cdev, 229 struct qed_dev_info *dev_info) 230 { 231 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 232 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 233 struct qed_tunnel_info *tun = &cdev->tunnel; 234 struct qed_ptt *ptt; 235 236 memset(dev_info, 0, sizeof(struct qed_dev_info)); 237 238 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 239 tun->vxlan.b_mode_enabled) 240 dev_info->vxlan_enable = true; 241 242 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 243 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 244 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 245 dev_info->gre_enable = true; 246 247 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 248 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 249 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 250 dev_info->geneve_enable = true; 251 252 dev_info->num_hwfns = cdev->num_hwfns; 253 dev_info->pci_mem_start = cdev->pci_params.mem_start; 254 dev_info->pci_mem_end = cdev->pci_params.mem_end; 255 dev_info->pci_irq = cdev->pci_params.irq; 256 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 257 dev_info->dev_type = cdev->type; 258 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 259 260 if (IS_PF(cdev)) { 261 dev_info->fw_major = FW_MAJOR_VERSION; 262 dev_info->fw_minor = FW_MINOR_VERSION; 263 dev_info->fw_rev = FW_REVISION_VERSION; 264 dev_info->fw_eng = FW_ENGINEERING_VERSION; 265 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 266 &cdev->mf_bits); 267 dev_info->tx_switching = true; 268 269 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 270 dev_info->wol_support = true; 271 272 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 273 274 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 275 } else { 276 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 277 &dev_info->fw_minor, &dev_info->fw_rev, 278 &dev_info->fw_eng); 279 } 280 281 if (IS_PF(cdev)) { 282 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 283 if (ptt) { 284 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 285 &dev_info->mfw_rev, NULL); 286 287 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 288 &dev_info->mbi_version); 289 290 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 291 &dev_info->flash_size); 292 293 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 294 } 295 } else { 296 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 297 &dev_info->mfw_rev, NULL); 298 } 299 300 dev_info->mtu = hw_info->mtu; 301 302 return 0; 303 } 304 305 static void qed_free_cdev(struct qed_dev *cdev) 306 { 307 kfree((void *)cdev); 308 } 309 310 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 311 { 312 struct qed_dev *cdev; 313 314 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 315 if (!cdev) 316 return cdev; 317 318 qed_init_struct(cdev); 319 320 return cdev; 321 } 322 323 /* Sets the requested power state */ 324 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 325 { 326 if (!cdev) 327 return -ENODEV; 328 329 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 330 return 0; 331 } 332 333 struct qed_devlink { 334 struct qed_dev *cdev; 335 }; 336 337 enum qed_devlink_param_id { 338 QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 339 QED_DEVLINK_PARAM_ID_IWARP_CMT, 340 }; 341 342 static int qed_dl_param_get(struct devlink *dl, u32 id, 343 struct devlink_param_gset_ctx *ctx) 344 { 345 struct qed_devlink *qed_dl; 346 struct qed_dev *cdev; 347 348 qed_dl = devlink_priv(dl); 349 cdev = qed_dl->cdev; 350 ctx->val.vbool = cdev->iwarp_cmt; 351 352 return 0; 353 } 354 355 static int qed_dl_param_set(struct devlink *dl, u32 id, 356 struct devlink_param_gset_ctx *ctx) 357 { 358 struct qed_devlink *qed_dl; 359 struct qed_dev *cdev; 360 361 qed_dl = devlink_priv(dl); 362 cdev = qed_dl->cdev; 363 cdev->iwarp_cmt = ctx->val.vbool; 364 365 return 0; 366 } 367 368 static const struct devlink_param qed_devlink_params[] = { 369 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, 370 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, 371 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 372 qed_dl_param_get, qed_dl_param_set, NULL), 373 }; 374 375 static const struct devlink_ops qed_dl_ops; 376 377 static int qed_devlink_register(struct qed_dev *cdev) 378 { 379 union devlink_param_value value; 380 struct qed_devlink *qed_dl; 381 struct devlink *dl; 382 int rc; 383 384 dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl)); 385 if (!dl) 386 return -ENOMEM; 387 388 qed_dl = devlink_priv(dl); 389 390 cdev->dl = dl; 391 qed_dl->cdev = cdev; 392 393 rc = devlink_register(dl, &cdev->pdev->dev); 394 if (rc) 395 goto err_free; 396 397 rc = devlink_params_register(dl, qed_devlink_params, 398 ARRAY_SIZE(qed_devlink_params)); 399 if (rc) 400 goto err_unregister; 401 402 value.vbool = false; 403 devlink_param_driverinit_value_set(dl, 404 QED_DEVLINK_PARAM_ID_IWARP_CMT, 405 value); 406 407 devlink_params_publish(dl); 408 cdev->iwarp_cmt = false; 409 410 return 0; 411 412 err_unregister: 413 devlink_unregister(dl); 414 415 err_free: 416 cdev->dl = NULL; 417 devlink_free(dl); 418 419 return rc; 420 } 421 422 static void qed_devlink_unregister(struct qed_dev *cdev) 423 { 424 if (!cdev->dl) 425 return; 426 427 devlink_params_unregister(cdev->dl, qed_devlink_params, 428 ARRAY_SIZE(qed_devlink_params)); 429 430 devlink_unregister(cdev->dl); 431 devlink_free(cdev->dl); 432 } 433 434 /* probing */ 435 static struct qed_dev *qed_probe(struct pci_dev *pdev, 436 struct qed_probe_params *params) 437 { 438 struct qed_dev *cdev; 439 int rc; 440 441 cdev = qed_alloc_cdev(pdev); 442 if (!cdev) 443 goto err0; 444 445 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 446 cdev->protocol = params->protocol; 447 448 if (params->is_vf) 449 cdev->b_is_vf = true; 450 451 qed_init_dp(cdev, params->dp_module, params->dp_level); 452 453 cdev->recov_in_prog = params->recov_in_prog; 454 455 rc = qed_init_pci(cdev, pdev); 456 if (rc) { 457 DP_ERR(cdev, "init pci failed\n"); 458 goto err1; 459 } 460 DP_INFO(cdev, "PCI init completed successfully\n"); 461 462 rc = qed_devlink_register(cdev); 463 if (rc) { 464 DP_INFO(cdev, "Failed to register devlink.\n"); 465 goto err2; 466 } 467 468 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 469 if (rc) { 470 DP_ERR(cdev, "hw prepare failed\n"); 471 goto err2; 472 } 473 474 DP_INFO(cdev, "qed_probe completed successfully\n"); 475 476 return cdev; 477 478 err2: 479 qed_free_pci(cdev); 480 err1: 481 qed_free_cdev(cdev); 482 err0: 483 return NULL; 484 } 485 486 static void qed_remove(struct qed_dev *cdev) 487 { 488 if (!cdev) 489 return; 490 491 qed_hw_remove(cdev); 492 493 qed_free_pci(cdev); 494 495 qed_set_power_state(cdev, PCI_D3hot); 496 497 qed_devlink_unregister(cdev); 498 499 qed_free_cdev(cdev); 500 } 501 502 static void qed_disable_msix(struct qed_dev *cdev) 503 { 504 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 505 pci_disable_msix(cdev->pdev); 506 kfree(cdev->int_params.msix_table); 507 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 508 pci_disable_msi(cdev->pdev); 509 } 510 511 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 512 } 513 514 static int qed_enable_msix(struct qed_dev *cdev, 515 struct qed_int_params *int_params) 516 { 517 int i, rc, cnt; 518 519 cnt = int_params->in.num_vectors; 520 521 for (i = 0; i < cnt; i++) 522 int_params->msix_table[i].entry = i; 523 524 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 525 int_params->in.min_msix_cnt, cnt); 526 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 527 (rc % cdev->num_hwfns)) { 528 pci_disable_msix(cdev->pdev); 529 530 /* If fastpath is initialized, we need at least one interrupt 531 * per hwfn [and the slow path interrupts]. New requested number 532 * should be a multiple of the number of hwfns. 533 */ 534 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 535 DP_NOTICE(cdev, 536 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 537 cnt, int_params->in.num_vectors); 538 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 539 cnt); 540 if (!rc) 541 rc = cnt; 542 } 543 544 if (rc > 0) { 545 /* MSI-x configuration was achieved */ 546 int_params->out.int_mode = QED_INT_MODE_MSIX; 547 int_params->out.num_vectors = rc; 548 rc = 0; 549 } else { 550 DP_NOTICE(cdev, 551 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 552 cnt, rc); 553 } 554 555 return rc; 556 } 557 558 /* This function outputs the int mode and the number of enabled msix vector */ 559 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 560 { 561 struct qed_int_params *int_params = &cdev->int_params; 562 struct msix_entry *tbl; 563 int rc = 0, cnt; 564 565 switch (int_params->in.int_mode) { 566 case QED_INT_MODE_MSIX: 567 /* Allocate MSIX table */ 568 cnt = int_params->in.num_vectors; 569 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 570 if (!int_params->msix_table) { 571 rc = -ENOMEM; 572 goto out; 573 } 574 575 /* Enable MSIX */ 576 rc = qed_enable_msix(cdev, int_params); 577 if (!rc) 578 goto out; 579 580 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 581 kfree(int_params->msix_table); 582 if (force_mode) 583 goto out; 584 /* Fallthrough */ 585 586 case QED_INT_MODE_MSI: 587 if (cdev->num_hwfns == 1) { 588 rc = pci_enable_msi(cdev->pdev); 589 if (!rc) { 590 int_params->out.int_mode = QED_INT_MODE_MSI; 591 goto out; 592 } 593 594 DP_NOTICE(cdev, "Failed to enable MSI\n"); 595 if (force_mode) 596 goto out; 597 } 598 /* Fallthrough */ 599 600 case QED_INT_MODE_INTA: 601 int_params->out.int_mode = QED_INT_MODE_INTA; 602 rc = 0; 603 goto out; 604 default: 605 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 606 int_params->in.int_mode); 607 rc = -EINVAL; 608 } 609 610 out: 611 if (!rc) 612 DP_INFO(cdev, "Using %s interrupts\n", 613 int_params->out.int_mode == QED_INT_MODE_INTA ? 614 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 615 "MSI" : "MSIX"); 616 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 617 618 return rc; 619 } 620 621 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 622 int index, void(*handler)(void *)) 623 { 624 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 625 int relative_idx = index / cdev->num_hwfns; 626 627 hwfn->simd_proto_handler[relative_idx].func = handler; 628 hwfn->simd_proto_handler[relative_idx].token = token; 629 } 630 631 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 632 { 633 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 634 int relative_idx = index / cdev->num_hwfns; 635 636 memset(&hwfn->simd_proto_handler[relative_idx], 0, 637 sizeof(struct qed_simd_fp_handler)); 638 } 639 640 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 641 { 642 tasklet_schedule((struct tasklet_struct *)tasklet); 643 return IRQ_HANDLED; 644 } 645 646 static irqreturn_t qed_single_int(int irq, void *dev_instance) 647 { 648 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 649 struct qed_hwfn *hwfn; 650 irqreturn_t rc = IRQ_NONE; 651 u64 status; 652 int i, j; 653 654 for (i = 0; i < cdev->num_hwfns; i++) { 655 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 656 657 if (!status) 658 continue; 659 660 hwfn = &cdev->hwfns[i]; 661 662 /* Slowpath interrupt */ 663 if (unlikely(status & 0x1)) { 664 tasklet_schedule(hwfn->sp_dpc); 665 status &= ~0x1; 666 rc = IRQ_HANDLED; 667 } 668 669 /* Fastpath interrupts */ 670 for (j = 0; j < 64; j++) { 671 if ((0x2ULL << j) & status) { 672 struct qed_simd_fp_handler *p_handler = 673 &hwfn->simd_proto_handler[j]; 674 675 if (p_handler->func) 676 p_handler->func(p_handler->token); 677 else 678 DP_NOTICE(hwfn, 679 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 680 j, status); 681 682 status &= ~(0x2ULL << j); 683 rc = IRQ_HANDLED; 684 } 685 } 686 687 if (unlikely(status)) 688 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 689 "got an unknown interrupt status 0x%llx\n", 690 status); 691 } 692 693 return rc; 694 } 695 696 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 697 { 698 struct qed_dev *cdev = hwfn->cdev; 699 u32 int_mode; 700 int rc = 0; 701 u8 id; 702 703 int_mode = cdev->int_params.out.int_mode; 704 if (int_mode == QED_INT_MODE_MSIX) { 705 id = hwfn->my_id; 706 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 707 id, cdev->pdev->bus->number, 708 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 709 rc = request_irq(cdev->int_params.msix_table[id].vector, 710 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 711 } else { 712 unsigned long flags = 0; 713 714 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 715 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 716 PCI_FUNC(cdev->pdev->devfn)); 717 718 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 719 flags |= IRQF_SHARED; 720 721 rc = request_irq(cdev->pdev->irq, qed_single_int, 722 flags, cdev->name, cdev); 723 } 724 725 if (rc) 726 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 727 else 728 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 729 "Requested slowpath %s\n", 730 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 731 732 return rc; 733 } 734 735 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 736 { 737 /* Calling the disable function will make sure that any 738 * currently-running function is completed. The following call to the 739 * enable function makes this sequence a flush-like operation. 740 */ 741 if (p_hwfn->b_sp_dpc_enabled) { 742 tasklet_disable(p_hwfn->sp_dpc); 743 tasklet_enable(p_hwfn->sp_dpc); 744 } 745 } 746 747 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 748 { 749 struct qed_dev *cdev = p_hwfn->cdev; 750 u8 id = p_hwfn->my_id; 751 u32 int_mode; 752 753 int_mode = cdev->int_params.out.int_mode; 754 if (int_mode == QED_INT_MODE_MSIX) 755 synchronize_irq(cdev->int_params.msix_table[id].vector); 756 else 757 synchronize_irq(cdev->pdev->irq); 758 759 qed_slowpath_tasklet_flush(p_hwfn); 760 } 761 762 static void qed_slowpath_irq_free(struct qed_dev *cdev) 763 { 764 int i; 765 766 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 767 for_each_hwfn(cdev, i) { 768 if (!cdev->hwfns[i].b_int_requested) 769 break; 770 synchronize_irq(cdev->int_params.msix_table[i].vector); 771 free_irq(cdev->int_params.msix_table[i].vector, 772 cdev->hwfns[i].sp_dpc); 773 } 774 } else { 775 if (QED_LEADING_HWFN(cdev)->b_int_requested) 776 free_irq(cdev->pdev->irq, cdev); 777 } 778 qed_int_disable_post_isr_release(cdev); 779 } 780 781 static int qed_nic_stop(struct qed_dev *cdev) 782 { 783 int i, rc; 784 785 rc = qed_hw_stop(cdev); 786 787 for (i = 0; i < cdev->num_hwfns; i++) { 788 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 789 790 if (p_hwfn->b_sp_dpc_enabled) { 791 tasklet_disable(p_hwfn->sp_dpc); 792 p_hwfn->b_sp_dpc_enabled = false; 793 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 794 "Disabled sp tasklet [hwfn %d] at %p\n", 795 i, p_hwfn->sp_dpc); 796 } 797 } 798 799 qed_dbg_pf_exit(cdev); 800 801 return rc; 802 } 803 804 static int qed_nic_setup(struct qed_dev *cdev) 805 { 806 int rc, i; 807 808 /* Determine if interface is going to require LL2 */ 809 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 810 for (i = 0; i < cdev->num_hwfns; i++) { 811 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 812 813 p_hwfn->using_ll2 = true; 814 } 815 } 816 817 rc = qed_resc_alloc(cdev); 818 if (rc) 819 return rc; 820 821 DP_INFO(cdev, "Allocated qed resources\n"); 822 823 qed_resc_setup(cdev); 824 825 return rc; 826 } 827 828 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 829 { 830 int limit = 0; 831 832 /* Mark the fastpath as free/used */ 833 cdev->int_params.fp_initialized = cnt ? true : false; 834 835 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 836 limit = cdev->num_hwfns * 63; 837 else if (cdev->int_params.fp_msix_cnt) 838 limit = cdev->int_params.fp_msix_cnt; 839 840 if (!limit) 841 return -ENOMEM; 842 843 return min_t(int, cnt, limit); 844 } 845 846 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 847 { 848 memset(info, 0, sizeof(struct qed_int_info)); 849 850 if (!cdev->int_params.fp_initialized) { 851 DP_INFO(cdev, 852 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 853 return -EINVAL; 854 } 855 856 /* Need to expose only MSI-X information; Single IRQ is handled solely 857 * by qed. 858 */ 859 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 860 int msix_base = cdev->int_params.fp_msix_base; 861 862 info->msix_cnt = cdev->int_params.fp_msix_cnt; 863 info->msix = &cdev->int_params.msix_table[msix_base]; 864 } 865 866 return 0; 867 } 868 869 static int qed_slowpath_setup_int(struct qed_dev *cdev, 870 enum qed_int_mode int_mode) 871 { 872 struct qed_sb_cnt_info sb_cnt_info; 873 int num_l2_queues = 0; 874 int rc; 875 int i; 876 877 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 878 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 879 return -EINVAL; 880 } 881 882 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 883 cdev->int_params.in.int_mode = int_mode; 884 for_each_hwfn(cdev, i) { 885 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 886 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 887 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 888 cdev->int_params.in.num_vectors++; /* slowpath */ 889 } 890 891 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 892 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 893 894 if (is_kdump_kernel()) { 895 DP_INFO(cdev, 896 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 897 cdev->int_params.in.min_msix_cnt); 898 cdev->int_params.in.num_vectors = 899 cdev->int_params.in.min_msix_cnt; 900 } 901 902 rc = qed_set_int_mode(cdev, false); 903 if (rc) { 904 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 905 return rc; 906 } 907 908 cdev->int_params.fp_msix_base = cdev->num_hwfns; 909 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 910 cdev->num_hwfns; 911 912 if (!IS_ENABLED(CONFIG_QED_RDMA) || 913 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 914 return 0; 915 916 for_each_hwfn(cdev, i) 917 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 918 919 DP_VERBOSE(cdev, QED_MSG_RDMA, 920 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 921 cdev->int_params.fp_msix_cnt, num_l2_queues); 922 923 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 924 cdev->int_params.rdma_msix_cnt = 925 (cdev->int_params.fp_msix_cnt - num_l2_queues) 926 / cdev->num_hwfns; 927 cdev->int_params.rdma_msix_base = 928 cdev->int_params.fp_msix_base + num_l2_queues; 929 cdev->int_params.fp_msix_cnt = num_l2_queues; 930 } else { 931 cdev->int_params.rdma_msix_cnt = 0; 932 } 933 934 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 935 cdev->int_params.rdma_msix_cnt, 936 cdev->int_params.rdma_msix_base); 937 938 return 0; 939 } 940 941 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 942 { 943 int rc; 944 945 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 946 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 947 948 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 949 &cdev->int_params.in.num_vectors); 950 if (cdev->num_hwfns > 1) { 951 u8 vectors = 0; 952 953 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 954 cdev->int_params.in.num_vectors += vectors; 955 } 956 957 /* We want a minimum of one fastpath vector per vf hwfn */ 958 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 959 960 rc = qed_set_int_mode(cdev, true); 961 if (rc) 962 return rc; 963 964 cdev->int_params.fp_msix_base = 0; 965 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 966 967 return 0; 968 } 969 970 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 971 u8 *input_buf, u32 max_size, u8 *unzip_buf) 972 { 973 int rc; 974 975 p_hwfn->stream->next_in = input_buf; 976 p_hwfn->stream->avail_in = input_len; 977 p_hwfn->stream->next_out = unzip_buf; 978 p_hwfn->stream->avail_out = max_size; 979 980 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 981 982 if (rc != Z_OK) { 983 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 984 rc); 985 return 0; 986 } 987 988 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 989 zlib_inflateEnd(p_hwfn->stream); 990 991 if (rc != Z_OK && rc != Z_STREAM_END) { 992 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 993 p_hwfn->stream->msg, rc); 994 return 0; 995 } 996 997 return p_hwfn->stream->total_out / 4; 998 } 999 1000 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1001 { 1002 int i; 1003 void *workspace; 1004 1005 for_each_hwfn(cdev, i) { 1006 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1007 1008 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1009 if (!p_hwfn->stream) 1010 return -ENOMEM; 1011 1012 workspace = vzalloc(zlib_inflate_workspacesize()); 1013 if (!workspace) 1014 return -ENOMEM; 1015 p_hwfn->stream->workspace = workspace; 1016 } 1017 1018 return 0; 1019 } 1020 1021 static void qed_free_stream_mem(struct qed_dev *cdev) 1022 { 1023 int i; 1024 1025 for_each_hwfn(cdev, i) { 1026 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1027 1028 if (!p_hwfn->stream) 1029 return; 1030 1031 vfree(p_hwfn->stream->workspace); 1032 kfree(p_hwfn->stream); 1033 } 1034 } 1035 1036 static void qed_update_pf_params(struct qed_dev *cdev, 1037 struct qed_pf_params *params) 1038 { 1039 int i; 1040 1041 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1042 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1043 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1044 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1045 /* divide by 3 the MRs to avoid MF ILT overflow */ 1046 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1047 } 1048 1049 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1050 params->eth_pf_params.num_arfs_filters = 0; 1051 1052 /* In case we might support RDMA, don't allow qede to be greedy 1053 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1054 * per hwfn. 1055 */ 1056 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1057 u16 *num_cons; 1058 1059 num_cons = ¶ms->eth_pf_params.num_cons; 1060 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1061 } 1062 1063 for (i = 0; i < cdev->num_hwfns; i++) { 1064 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1065 1066 p_hwfn->pf_params = *params; 1067 } 1068 } 1069 1070 #define QED_PERIODIC_DB_REC_COUNT 10 1071 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1072 #define QED_PERIODIC_DB_REC_INTERVAL \ 1073 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1074 1075 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1076 enum qed_slowpath_wq_flag wq_flag, 1077 unsigned long delay) 1078 { 1079 if (!hwfn->slowpath_wq_active) 1080 return -EINVAL; 1081 1082 /* Memory barrier for setting atomic bit */ 1083 smp_mb__before_atomic(); 1084 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1085 smp_mb__after_atomic(); 1086 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1087 1088 return 0; 1089 } 1090 1091 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1092 { 1093 /* Reset periodic Doorbell Recovery counter */ 1094 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1095 1096 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1097 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1098 &p_hwfn->slowpath_task_flags)) 1099 return; 1100 1101 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1102 QED_PERIODIC_DB_REC_INTERVAL); 1103 } 1104 1105 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1106 { 1107 int i; 1108 1109 if (IS_VF(cdev)) 1110 return; 1111 1112 for_each_hwfn(cdev, i) { 1113 if (!cdev->hwfns[i].slowpath_wq) 1114 continue; 1115 1116 /* Stop queuing new delayed works */ 1117 cdev->hwfns[i].slowpath_wq_active = false; 1118 1119 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1120 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1121 } 1122 } 1123 1124 static void qed_slowpath_task(struct work_struct *work) 1125 { 1126 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1127 slowpath_task.work); 1128 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1129 1130 if (!ptt) { 1131 if (hwfn->slowpath_wq_active) 1132 queue_delayed_work(hwfn->slowpath_wq, 1133 &hwfn->slowpath_task, 0); 1134 1135 return; 1136 } 1137 1138 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1139 &hwfn->slowpath_task_flags)) 1140 qed_mfw_process_tlv_req(hwfn, ptt); 1141 1142 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1143 &hwfn->slowpath_task_flags)) { 1144 qed_db_rec_handler(hwfn, ptt); 1145 if (hwfn->periodic_db_rec_count--) 1146 qed_slowpath_delayed_work(hwfn, 1147 QED_SLOWPATH_PERIODIC_DB_REC, 1148 QED_PERIODIC_DB_REC_INTERVAL); 1149 } 1150 1151 qed_ptt_release(hwfn, ptt); 1152 } 1153 1154 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1155 { 1156 struct qed_hwfn *hwfn; 1157 char name[NAME_SIZE]; 1158 int i; 1159 1160 if (IS_VF(cdev)) 1161 return 0; 1162 1163 for_each_hwfn(cdev, i) { 1164 hwfn = &cdev->hwfns[i]; 1165 1166 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1167 cdev->pdev->bus->number, 1168 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1169 1170 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1171 if (!hwfn->slowpath_wq) { 1172 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1173 return -ENOMEM; 1174 } 1175 1176 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1177 hwfn->slowpath_wq_active = true; 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int qed_slowpath_start(struct qed_dev *cdev, 1184 struct qed_slowpath_params *params) 1185 { 1186 struct qed_drv_load_params drv_load_params; 1187 struct qed_hw_init_params hw_init_params; 1188 struct qed_mcp_drv_version drv_version; 1189 struct qed_tunnel_info tunn_info; 1190 const u8 *data = NULL; 1191 struct qed_hwfn *hwfn; 1192 struct qed_ptt *p_ptt; 1193 int rc = -EINVAL; 1194 1195 if (qed_iov_wq_start(cdev)) 1196 goto err; 1197 1198 if (qed_slowpath_wq_start(cdev)) 1199 goto err; 1200 1201 if (IS_PF(cdev)) { 1202 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1203 &cdev->pdev->dev); 1204 if (rc) { 1205 DP_NOTICE(cdev, 1206 "Failed to find fw file - /lib/firmware/%s\n", 1207 QED_FW_FILE_NAME); 1208 goto err; 1209 } 1210 1211 if (cdev->num_hwfns == 1) { 1212 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1213 if (p_ptt) { 1214 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1215 } else { 1216 DP_NOTICE(cdev, 1217 "Failed to acquire PTT for aRFS\n"); 1218 goto err; 1219 } 1220 } 1221 } 1222 1223 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1224 rc = qed_nic_setup(cdev); 1225 if (rc) 1226 goto err; 1227 1228 if (IS_PF(cdev)) 1229 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1230 else 1231 rc = qed_slowpath_vf_setup_int(cdev); 1232 if (rc) 1233 goto err1; 1234 1235 if (IS_PF(cdev)) { 1236 /* Allocate stream for unzipping */ 1237 rc = qed_alloc_stream_mem(cdev); 1238 if (rc) 1239 goto err2; 1240 1241 /* First Dword used to differentiate between various sources */ 1242 data = cdev->firmware->data + sizeof(u32); 1243 1244 qed_dbg_pf_init(cdev); 1245 } 1246 1247 /* Start the slowpath */ 1248 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1249 memset(&tunn_info, 0, sizeof(tunn_info)); 1250 tunn_info.vxlan.b_mode_enabled = true; 1251 tunn_info.l2_gre.b_mode_enabled = true; 1252 tunn_info.ip_gre.b_mode_enabled = true; 1253 tunn_info.l2_geneve.b_mode_enabled = true; 1254 tunn_info.ip_geneve.b_mode_enabled = true; 1255 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1256 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1257 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1258 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1259 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1260 hw_init_params.p_tunn = &tunn_info; 1261 hw_init_params.b_hw_start = true; 1262 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1263 hw_init_params.allow_npar_tx_switch = true; 1264 hw_init_params.bin_fw_data = data; 1265 1266 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1267 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1268 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1269 drv_load_params.avoid_eng_reset = false; 1270 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1271 hw_init_params.p_drv_load_params = &drv_load_params; 1272 1273 rc = qed_hw_init(cdev, &hw_init_params); 1274 if (rc) 1275 goto err2; 1276 1277 DP_INFO(cdev, 1278 "HW initialization and function start completed successfully\n"); 1279 1280 if (IS_PF(cdev)) { 1281 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1282 BIT(QED_MODE_L2GENEVE_TUNN) | 1283 BIT(QED_MODE_IPGENEVE_TUNN) | 1284 BIT(QED_MODE_L2GRE_TUNN) | 1285 BIT(QED_MODE_IPGRE_TUNN)); 1286 } 1287 1288 /* Allocate LL2 interface if needed */ 1289 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1290 rc = qed_ll2_alloc_if(cdev); 1291 if (rc) 1292 goto err3; 1293 } 1294 if (IS_PF(cdev)) { 1295 hwfn = QED_LEADING_HWFN(cdev); 1296 drv_version.version = (params->drv_major << 24) | 1297 (params->drv_minor << 16) | 1298 (params->drv_rev << 8) | 1299 (params->drv_eng); 1300 strlcpy(drv_version.name, params->name, 1301 MCP_DRV_VER_STR_SIZE - 4); 1302 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1303 &drv_version); 1304 if (rc) { 1305 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1306 goto err4; 1307 } 1308 } 1309 1310 qed_reset_vport_stats(cdev); 1311 1312 return 0; 1313 1314 err4: 1315 qed_ll2_dealloc_if(cdev); 1316 err3: 1317 qed_hw_stop(cdev); 1318 err2: 1319 qed_hw_timers_stop_all(cdev); 1320 if (IS_PF(cdev)) 1321 qed_slowpath_irq_free(cdev); 1322 qed_free_stream_mem(cdev); 1323 qed_disable_msix(cdev); 1324 err1: 1325 qed_resc_free(cdev); 1326 err: 1327 if (IS_PF(cdev)) 1328 release_firmware(cdev->firmware); 1329 1330 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1331 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1332 qed_ptt_release(QED_LEADING_HWFN(cdev), 1333 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1334 1335 qed_iov_wq_stop(cdev, false); 1336 1337 qed_slowpath_wq_stop(cdev); 1338 1339 return rc; 1340 } 1341 1342 static int qed_slowpath_stop(struct qed_dev *cdev) 1343 { 1344 if (!cdev) 1345 return -ENODEV; 1346 1347 qed_slowpath_wq_stop(cdev); 1348 1349 qed_ll2_dealloc_if(cdev); 1350 1351 if (IS_PF(cdev)) { 1352 if (cdev->num_hwfns == 1) 1353 qed_ptt_release(QED_LEADING_HWFN(cdev), 1354 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1355 qed_free_stream_mem(cdev); 1356 if (IS_QED_ETH_IF(cdev)) 1357 qed_sriov_disable(cdev, true); 1358 } 1359 1360 qed_nic_stop(cdev); 1361 1362 if (IS_PF(cdev)) 1363 qed_slowpath_irq_free(cdev); 1364 1365 qed_disable_msix(cdev); 1366 1367 qed_resc_free(cdev); 1368 1369 qed_iov_wq_stop(cdev, true); 1370 1371 if (IS_PF(cdev)) 1372 release_firmware(cdev->firmware); 1373 1374 return 0; 1375 } 1376 1377 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1378 { 1379 int i; 1380 1381 memcpy(cdev->name, name, NAME_SIZE); 1382 for_each_hwfn(cdev, i) 1383 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1384 } 1385 1386 static u32 qed_sb_init(struct qed_dev *cdev, 1387 struct qed_sb_info *sb_info, 1388 void *sb_virt_addr, 1389 dma_addr_t sb_phy_addr, u16 sb_id, 1390 enum qed_sb_type type) 1391 { 1392 struct qed_hwfn *p_hwfn; 1393 struct qed_ptt *p_ptt; 1394 u16 rel_sb_id; 1395 u32 rc; 1396 1397 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1398 if (type == QED_SB_TYPE_L2_QUEUE) { 1399 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1400 rel_sb_id = sb_id / cdev->num_hwfns; 1401 } else { 1402 p_hwfn = QED_AFFIN_HWFN(cdev); 1403 rel_sb_id = sb_id; 1404 } 1405 1406 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1407 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1408 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1409 1410 if (IS_PF(p_hwfn->cdev)) { 1411 p_ptt = qed_ptt_acquire(p_hwfn); 1412 if (!p_ptt) 1413 return -EBUSY; 1414 1415 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1416 sb_phy_addr, rel_sb_id); 1417 qed_ptt_release(p_hwfn, p_ptt); 1418 } else { 1419 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1420 sb_phy_addr, rel_sb_id); 1421 } 1422 1423 return rc; 1424 } 1425 1426 static u32 qed_sb_release(struct qed_dev *cdev, 1427 struct qed_sb_info *sb_info, 1428 u16 sb_id, 1429 enum qed_sb_type type) 1430 { 1431 struct qed_hwfn *p_hwfn; 1432 u16 rel_sb_id; 1433 u32 rc; 1434 1435 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1436 if (type == QED_SB_TYPE_L2_QUEUE) { 1437 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1438 rel_sb_id = sb_id / cdev->num_hwfns; 1439 } else { 1440 p_hwfn = QED_AFFIN_HWFN(cdev); 1441 rel_sb_id = sb_id; 1442 } 1443 1444 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1445 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1446 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1447 1448 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1449 1450 return rc; 1451 } 1452 1453 static bool qed_can_link_change(struct qed_dev *cdev) 1454 { 1455 return true; 1456 } 1457 1458 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1459 { 1460 __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); 1461 struct qed_mcp_link_params *link_params; 1462 struct qed_hwfn *hwfn; 1463 struct qed_ptt *ptt; 1464 u32 as; 1465 int rc; 1466 1467 if (!cdev) 1468 return -ENODEV; 1469 1470 /* The link should be set only once per PF */ 1471 hwfn = &cdev->hwfns[0]; 1472 1473 /* When VF wants to set link, force it to read the bulletin instead. 1474 * This mimics the PF behavior, where a noitification [both immediate 1475 * and possible later] would be generated when changing properties. 1476 */ 1477 if (IS_VF(cdev)) { 1478 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1479 return 0; 1480 } 1481 1482 ptt = qed_ptt_acquire(hwfn); 1483 if (!ptt) 1484 return -EBUSY; 1485 1486 link_params = qed_mcp_get_link_params(hwfn); 1487 if (!link_params) 1488 return -ENODATA; 1489 1490 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1491 link_params->speed.autoneg = params->autoneg; 1492 1493 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1494 as = 0; 1495 1496 phylink_zero(sup_caps); 1497 phylink_set(sup_caps, 1000baseT_Full); 1498 phylink_set(sup_caps, 1000baseKX_Full); 1499 phylink_set(sup_caps, 1000baseX_Full); 1500 1501 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1502 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1503 1504 phylink_zero(sup_caps); 1505 phylink_set(sup_caps, 10000baseT_Full); 1506 phylink_set(sup_caps, 10000baseKR_Full); 1507 phylink_set(sup_caps, 10000baseKX4_Full); 1508 phylink_set(sup_caps, 10000baseR_FEC); 1509 phylink_set(sup_caps, 10000baseCR_Full); 1510 phylink_set(sup_caps, 10000baseSR_Full); 1511 phylink_set(sup_caps, 10000baseLR_Full); 1512 phylink_set(sup_caps, 10000baseLRM_Full); 1513 1514 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1515 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1516 1517 phylink_zero(sup_caps); 1518 phylink_set(sup_caps, 20000baseKR2_Full); 1519 1520 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1521 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1522 1523 phylink_zero(sup_caps); 1524 phylink_set(sup_caps, 25000baseKR_Full); 1525 phylink_set(sup_caps, 25000baseCR_Full); 1526 phylink_set(sup_caps, 25000baseSR_Full); 1527 1528 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1529 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1530 1531 phylink_zero(sup_caps); 1532 phylink_set(sup_caps, 40000baseLR4_Full); 1533 phylink_set(sup_caps, 40000baseKR4_Full); 1534 phylink_set(sup_caps, 40000baseCR4_Full); 1535 phylink_set(sup_caps, 40000baseSR4_Full); 1536 1537 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1538 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1539 1540 phylink_zero(sup_caps); 1541 phylink_set(sup_caps, 50000baseKR2_Full); 1542 phylink_set(sup_caps, 50000baseCR2_Full); 1543 phylink_set(sup_caps, 50000baseSR2_Full); 1544 1545 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1546 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1547 1548 phylink_zero(sup_caps); 1549 phylink_set(sup_caps, 100000baseKR4_Full); 1550 phylink_set(sup_caps, 100000baseSR4_Full); 1551 phylink_set(sup_caps, 100000baseCR4_Full); 1552 phylink_set(sup_caps, 100000baseLR4_ER4_Full); 1553 1554 if (linkmode_intersects(params->adv_speeds, sup_caps)) 1555 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1556 1557 link_params->speed.advertised_speeds = as; 1558 } 1559 1560 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1561 link_params->speed.forced_speed = params->forced_speed; 1562 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1563 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1564 link_params->pause.autoneg = true; 1565 else 1566 link_params->pause.autoneg = false; 1567 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1568 link_params->pause.forced_rx = true; 1569 else 1570 link_params->pause.forced_rx = false; 1571 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1572 link_params->pause.forced_tx = true; 1573 else 1574 link_params->pause.forced_tx = false; 1575 } 1576 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1577 switch (params->loopback_mode) { 1578 case QED_LINK_LOOPBACK_INT_PHY: 1579 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1580 break; 1581 case QED_LINK_LOOPBACK_EXT_PHY: 1582 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1583 break; 1584 case QED_LINK_LOOPBACK_EXT: 1585 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1586 break; 1587 case QED_LINK_LOOPBACK_MAC: 1588 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1589 break; 1590 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1591 link_params->loopback_mode = 1592 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1593 break; 1594 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1595 link_params->loopback_mode = 1596 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1597 break; 1598 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1599 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1600 break; 1601 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1602 link_params->loopback_mode = 1603 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1604 break; 1605 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1606 link_params->loopback_mode = 1607 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1608 break; 1609 default: 1610 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1611 break; 1612 } 1613 } 1614 1615 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1616 memcpy(&link_params->eee, ¶ms->eee, 1617 sizeof(link_params->eee)); 1618 1619 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1620 link_params->fec = params->fec; 1621 1622 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1623 1624 qed_ptt_release(hwfn, ptt); 1625 1626 return rc; 1627 } 1628 1629 static int qed_get_port_type(u32 media_type) 1630 { 1631 int port_type; 1632 1633 switch (media_type) { 1634 case MEDIA_SFPP_10G_FIBER: 1635 case MEDIA_SFP_1G_FIBER: 1636 case MEDIA_XFP_FIBER: 1637 case MEDIA_MODULE_FIBER: 1638 case MEDIA_KR: 1639 port_type = PORT_FIBRE; 1640 break; 1641 case MEDIA_DA_TWINAX: 1642 port_type = PORT_DA; 1643 break; 1644 case MEDIA_BASE_T: 1645 port_type = PORT_TP; 1646 break; 1647 case MEDIA_NOT_PRESENT: 1648 port_type = PORT_NONE; 1649 break; 1650 case MEDIA_UNSPECIFIED: 1651 default: 1652 port_type = PORT_OTHER; 1653 break; 1654 } 1655 return port_type; 1656 } 1657 1658 static int qed_get_link_data(struct qed_hwfn *hwfn, 1659 struct qed_mcp_link_params *params, 1660 struct qed_mcp_link_state *link, 1661 struct qed_mcp_link_capabilities *link_caps) 1662 { 1663 void *p; 1664 1665 if (!IS_PF(hwfn->cdev)) { 1666 qed_vf_get_link_params(hwfn, params); 1667 qed_vf_get_link_state(hwfn, link); 1668 qed_vf_get_link_caps(hwfn, link_caps); 1669 1670 return 0; 1671 } 1672 1673 p = qed_mcp_get_link_params(hwfn); 1674 if (!p) 1675 return -ENXIO; 1676 memcpy(params, p, sizeof(*params)); 1677 1678 p = qed_mcp_get_link_state(hwfn); 1679 if (!p) 1680 return -ENXIO; 1681 memcpy(link, p, sizeof(*link)); 1682 1683 p = qed_mcp_get_link_capabilities(hwfn); 1684 if (!p) 1685 return -ENXIO; 1686 memcpy(link_caps, p, sizeof(*link_caps)); 1687 1688 return 0; 1689 } 1690 1691 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1692 struct qed_ptt *ptt, u32 capability, 1693 unsigned long *if_caps) 1694 { 1695 u32 media_type, tcvr_state, tcvr_type; 1696 u32 speed_mask, board_cfg; 1697 1698 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1699 media_type = MEDIA_UNSPECIFIED; 1700 1701 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1702 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1703 1704 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1705 speed_mask = 0xFFFFFFFF; 1706 1707 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1708 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1709 1710 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1711 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1712 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1713 1714 switch (media_type) { 1715 case MEDIA_DA_TWINAX: 1716 phylink_set(if_caps, FIBRE); 1717 1718 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1719 phylink_set(if_caps, 20000baseKR2_Full); 1720 1721 /* For DAC media multiple speed capabilities are supported */ 1722 capability |= speed_mask; 1723 1724 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1725 phylink_set(if_caps, 1000baseKX_Full); 1726 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1727 phylink_set(if_caps, 10000baseCR_Full); 1728 1729 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1730 switch (tcvr_type) { 1731 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1732 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1733 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1734 phylink_set(if_caps, 40000baseCR4_Full); 1735 break; 1736 default: 1737 break; 1738 } 1739 1740 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1741 phylink_set(if_caps, 25000baseCR_Full); 1742 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1743 phylink_set(if_caps, 50000baseCR2_Full); 1744 1745 if (capability & 1746 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1747 switch (tcvr_type) { 1748 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1749 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1750 phylink_set(if_caps, 100000baseCR4_Full); 1751 break; 1752 default: 1753 break; 1754 } 1755 1756 break; 1757 case MEDIA_BASE_T: 1758 phylink_set(if_caps, TP); 1759 1760 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1761 if (capability & 1762 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1763 phylink_set(if_caps, 1000baseT_Full); 1764 if (capability & 1765 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1766 phylink_set(if_caps, 10000baseT_Full); 1767 } 1768 1769 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1770 phylink_set(if_caps, FIBRE); 1771 1772 switch (tcvr_type) { 1773 case ETH_TRANSCEIVER_TYPE_1000BASET: 1774 phylink_set(if_caps, 1000baseT_Full); 1775 break; 1776 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1777 phylink_set(if_caps, 10000baseT_Full); 1778 break; 1779 default: 1780 break; 1781 } 1782 } 1783 1784 break; 1785 case MEDIA_SFP_1G_FIBER: 1786 case MEDIA_SFPP_10G_FIBER: 1787 case MEDIA_XFP_FIBER: 1788 case MEDIA_MODULE_FIBER: 1789 phylink_set(if_caps, FIBRE); 1790 capability |= speed_mask; 1791 1792 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1793 switch (tcvr_type) { 1794 case ETH_TRANSCEIVER_TYPE_1G_LX: 1795 case ETH_TRANSCEIVER_TYPE_1G_SX: 1796 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1797 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1798 phylink_set(if_caps, 1000baseKX_Full); 1799 break; 1800 default: 1801 break; 1802 } 1803 1804 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1805 switch (tcvr_type) { 1806 case ETH_TRANSCEIVER_TYPE_10G_SR: 1807 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1808 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1809 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1810 phylink_set(if_caps, 10000baseSR_Full); 1811 break; 1812 case ETH_TRANSCEIVER_TYPE_10G_LR: 1813 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1814 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1815 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1816 phylink_set(if_caps, 10000baseLR_Full); 1817 break; 1818 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1819 phylink_set(if_caps, 10000baseLRM_Full); 1820 break; 1821 case ETH_TRANSCEIVER_TYPE_10G_ER: 1822 phylink_set(if_caps, 10000baseR_FEC); 1823 break; 1824 default: 1825 break; 1826 } 1827 1828 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1829 phylink_set(if_caps, 20000baseKR2_Full); 1830 1831 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1832 switch (tcvr_type) { 1833 case ETH_TRANSCEIVER_TYPE_25G_SR: 1834 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1835 phylink_set(if_caps, 25000baseSR_Full); 1836 break; 1837 default: 1838 break; 1839 } 1840 1841 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1842 switch (tcvr_type) { 1843 case ETH_TRANSCEIVER_TYPE_40G_LR4: 1844 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1845 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 1846 phylink_set(if_caps, 40000baseLR4_Full); 1847 break; 1848 case ETH_TRANSCEIVER_TYPE_40G_SR4: 1849 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 1850 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1851 phylink_set(if_caps, 40000baseSR4_Full); 1852 break; 1853 default: 1854 break; 1855 } 1856 1857 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1858 phylink_set(if_caps, 50000baseKR2_Full); 1859 1860 if (capability & 1861 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1862 switch (tcvr_type) { 1863 case ETH_TRANSCEIVER_TYPE_100G_SR4: 1864 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 1865 phylink_set(if_caps, 100000baseSR4_Full); 1866 break; 1867 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 1868 phylink_set(if_caps, 100000baseLR4_ER4_Full); 1869 break; 1870 default: 1871 break; 1872 } 1873 1874 break; 1875 case MEDIA_KR: 1876 phylink_set(if_caps, Backplane); 1877 1878 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1879 phylink_set(if_caps, 20000baseKR2_Full); 1880 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1881 phylink_set(if_caps, 1000baseKX_Full); 1882 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1883 phylink_set(if_caps, 10000baseKR_Full); 1884 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1885 phylink_set(if_caps, 25000baseKR_Full); 1886 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1887 phylink_set(if_caps, 40000baseKR4_Full); 1888 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1889 phylink_set(if_caps, 50000baseKR2_Full); 1890 if (capability & 1891 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1892 phylink_set(if_caps, 100000baseKR4_Full); 1893 1894 break; 1895 case MEDIA_UNSPECIFIED: 1896 case MEDIA_NOT_PRESENT: 1897 default: 1898 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 1899 "Unknown media and transceiver type;\n"); 1900 break; 1901 } 1902 } 1903 1904 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 1905 { 1906 *speed_mask = 0; 1907 1908 if (caps & 1909 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 1910 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1911 if (caps & QED_LINK_PARTNER_SPEED_10G) 1912 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1913 if (caps & QED_LINK_PARTNER_SPEED_20G) 1914 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1915 if (caps & QED_LINK_PARTNER_SPEED_25G) 1916 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1917 if (caps & QED_LINK_PARTNER_SPEED_40G) 1918 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1919 if (caps & QED_LINK_PARTNER_SPEED_50G) 1920 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1921 if (caps & QED_LINK_PARTNER_SPEED_100G) 1922 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1923 } 1924 1925 static void qed_fill_link(struct qed_hwfn *hwfn, 1926 struct qed_ptt *ptt, 1927 struct qed_link_output *if_link) 1928 { 1929 struct qed_mcp_link_capabilities link_caps; 1930 struct qed_mcp_link_params params; 1931 struct qed_mcp_link_state link; 1932 u32 media_type, speed_mask; 1933 1934 memset(if_link, 0, sizeof(*if_link)); 1935 1936 /* Prepare source inputs */ 1937 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1938 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1939 return; 1940 } 1941 1942 /* Set the link parameters to pass to protocol driver */ 1943 if (link.link_up) 1944 if_link->link_up = true; 1945 1946 /* TODO - at the moment assume supported and advertised speed equal */ 1947 if (link_caps.default_speed_autoneg) 1948 phylink_set(if_link->supported_caps, Autoneg); 1949 if (params.pause.autoneg || 1950 (params.pause.forced_rx && params.pause.forced_tx)) 1951 phylink_set(if_link->supported_caps, Asym_Pause); 1952 if (params.pause.autoneg || params.pause.forced_rx || 1953 params.pause.forced_tx) 1954 phylink_set(if_link->supported_caps, Pause); 1955 1956 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 1957 1958 if (params.speed.autoneg) 1959 phylink_set(if_link->advertised_caps, Autoneg); 1960 else 1961 phylink_clear(if_link->advertised_caps, Autoneg); 1962 1963 if_link->sup_fec = link_caps.fec_default; 1964 if_link->active_fec = params.fec; 1965 1966 /* Fill link advertised capability */ 1967 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 1968 if_link->advertised_caps); 1969 1970 /* Fill link supported capability */ 1971 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 1972 if_link->supported_caps); 1973 1974 /* Fill partner advertised capability */ 1975 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 1976 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 1977 1978 if (link.link_up) 1979 if_link->speed = link.speed; 1980 1981 /* TODO - fill duplex properly */ 1982 if_link->duplex = DUPLEX_FULL; 1983 qed_mcp_get_media_type(hwfn, ptt, &media_type); 1984 if_link->port = qed_get_port_type(media_type); 1985 1986 if_link->autoneg = params.speed.autoneg; 1987 1988 if (params.pause.autoneg) 1989 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1990 if (params.pause.forced_rx) 1991 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1992 if (params.pause.forced_tx) 1993 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1994 1995 if (link.an_complete) 1996 phylink_set(if_link->lp_caps, Autoneg); 1997 if (link.partner_adv_pause) 1998 phylink_set(if_link->lp_caps, Pause); 1999 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2000 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2001 phylink_set(if_link->lp_caps, Asym_Pause); 2002 2003 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2004 if_link->eee_supported = false; 2005 } else { 2006 if_link->eee_supported = true; 2007 if_link->eee_active = link.eee_active; 2008 if_link->sup_caps = link_caps.eee_speed_caps; 2009 /* MFW clears adv_caps on eee disable; use configured value */ 2010 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2011 params.eee.adv_caps; 2012 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2013 if_link->eee.enable = params.eee.enable; 2014 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2015 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2016 } 2017 } 2018 2019 static void qed_get_current_link(struct qed_dev *cdev, 2020 struct qed_link_output *if_link) 2021 { 2022 struct qed_hwfn *hwfn; 2023 struct qed_ptt *ptt; 2024 int i; 2025 2026 hwfn = &cdev->hwfns[0]; 2027 if (IS_PF(cdev)) { 2028 ptt = qed_ptt_acquire(hwfn); 2029 if (ptt) { 2030 qed_fill_link(hwfn, ptt, if_link); 2031 qed_ptt_release(hwfn, ptt); 2032 } else { 2033 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2034 } 2035 } else { 2036 qed_fill_link(hwfn, NULL, if_link); 2037 } 2038 2039 for_each_hwfn(cdev, i) 2040 qed_inform_vf_link_state(&cdev->hwfns[i]); 2041 } 2042 2043 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2044 { 2045 void *cookie = hwfn->cdev->ops_cookie; 2046 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2047 struct qed_link_output if_link; 2048 2049 qed_fill_link(hwfn, ptt, &if_link); 2050 qed_inform_vf_link_state(hwfn); 2051 2052 if (IS_LEAD_HWFN(hwfn) && cookie) 2053 op->link_update(cookie, &if_link); 2054 } 2055 2056 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2057 { 2058 void *cookie = hwfn->cdev->ops_cookie; 2059 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2060 2061 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2062 op->bw_update(cookie); 2063 } 2064 2065 static int qed_drain(struct qed_dev *cdev) 2066 { 2067 struct qed_hwfn *hwfn; 2068 struct qed_ptt *ptt; 2069 int i, rc; 2070 2071 if (IS_VF(cdev)) 2072 return 0; 2073 2074 for_each_hwfn(cdev, i) { 2075 hwfn = &cdev->hwfns[i]; 2076 ptt = qed_ptt_acquire(hwfn); 2077 if (!ptt) { 2078 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2079 return -EBUSY; 2080 } 2081 rc = qed_mcp_drain(hwfn, ptt); 2082 qed_ptt_release(hwfn, ptt); 2083 if (rc) 2084 return rc; 2085 } 2086 2087 return 0; 2088 } 2089 2090 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2091 struct qed_nvm_image_att *nvm_image, 2092 u32 *crc) 2093 { 2094 u8 *buf = NULL; 2095 int rc; 2096 2097 /* Allocate a buffer for holding the nvram image */ 2098 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2099 if (!buf) 2100 return -ENOMEM; 2101 2102 /* Read image into buffer */ 2103 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2104 buf, nvm_image->length); 2105 if (rc) { 2106 DP_ERR(cdev, "Failed reading image from nvm\n"); 2107 goto out; 2108 } 2109 2110 /* Convert the buffer into big-endian format (excluding the 2111 * closing 4 bytes of CRC). 2112 */ 2113 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2114 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2115 2116 /* Calc CRC for the "actual" image buffer, i.e. not including 2117 * the last 4 CRC bytes. 2118 */ 2119 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2120 *crc = (__force u32)cpu_to_be32p(crc); 2121 2122 out: 2123 kfree(buf); 2124 2125 return rc; 2126 } 2127 2128 /* Binary file format - 2129 * /----------------------------------------------------------------------\ 2130 * 0B | 0x4 [command index] | 2131 * 4B | image_type | Options | Number of register settings | 2132 * 8B | Value | 2133 * 12B | Mask | 2134 * 16B | Offset | 2135 * \----------------------------------------------------------------------/ 2136 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2137 * Options - 0'b - Calculate & Update CRC for image 2138 */ 2139 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2140 bool *check_resp) 2141 { 2142 struct qed_nvm_image_att nvm_image; 2143 struct qed_hwfn *p_hwfn; 2144 bool is_crc = false; 2145 u32 image_type; 2146 int rc = 0, i; 2147 u16 len; 2148 2149 *data += 4; 2150 image_type = **data; 2151 p_hwfn = QED_LEADING_HWFN(cdev); 2152 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2153 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2154 break; 2155 if (i == p_hwfn->nvm_info.num_images) { 2156 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2157 image_type); 2158 return -ENOENT; 2159 } 2160 2161 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2162 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2163 2164 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2165 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2166 **data, image_type, nvm_image.start_addr, 2167 nvm_image.start_addr + nvm_image.length - 1); 2168 (*data)++; 2169 is_crc = !!(**data & BIT(0)); 2170 (*data)++; 2171 len = *((u16 *)*data); 2172 *data += 2; 2173 if (is_crc) { 2174 u32 crc = 0; 2175 2176 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2177 if (rc) { 2178 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2179 goto exit; 2180 } 2181 2182 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2183 (nvm_image.start_addr + 2184 nvm_image.length - 4), (u8 *)&crc, 4); 2185 if (rc) 2186 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2187 nvm_image.start_addr + nvm_image.length - 4, rc); 2188 goto exit; 2189 } 2190 2191 /* Iterate over the values for setting */ 2192 while (len) { 2193 u32 offset, mask, value, cur_value; 2194 u8 buf[4]; 2195 2196 value = *((u32 *)*data); 2197 *data += 4; 2198 mask = *((u32 *)*data); 2199 *data += 4; 2200 offset = *((u32 *)*data); 2201 *data += 4; 2202 2203 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2204 4); 2205 if (rc) { 2206 DP_ERR(cdev, "Failed reading from %08x\n", 2207 nvm_image.start_addr + offset); 2208 goto exit; 2209 } 2210 2211 cur_value = le32_to_cpu(*((__le32 *)buf)); 2212 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2213 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2214 nvm_image.start_addr + offset, cur_value, 2215 (cur_value & ~mask) | (value & mask), value, mask); 2216 value = (value & mask) | (cur_value & ~mask); 2217 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2218 nvm_image.start_addr + offset, 2219 (u8 *)&value, 4); 2220 if (rc) { 2221 DP_ERR(cdev, "Failed writing to %08x\n", 2222 nvm_image.start_addr + offset); 2223 goto exit; 2224 } 2225 2226 len--; 2227 } 2228 exit: 2229 return rc; 2230 } 2231 2232 /* Binary file format - 2233 * /----------------------------------------------------------------------\ 2234 * 0B | 0x3 [command index] | 2235 * 4B | b'0: check_response? | b'1-31 reserved | 2236 * 8B | File-type | reserved | 2237 * 12B | Image length in bytes | 2238 * \----------------------------------------------------------------------/ 2239 * Start a new file of the provided type 2240 */ 2241 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2242 const u8 **data, bool *check_resp) 2243 { 2244 u32 file_type, file_size = 0; 2245 int rc; 2246 2247 *data += 4; 2248 *check_resp = !!(**data & BIT(0)); 2249 *data += 4; 2250 file_type = **data; 2251 2252 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2253 "About to start a new file of type %02x\n", file_type); 2254 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2255 *data += 4; 2256 file_size = *((u32 *)(*data)); 2257 } 2258 2259 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2260 (u8 *)(&file_size), 4); 2261 *data += 4; 2262 2263 return rc; 2264 } 2265 2266 /* Binary file format - 2267 * /----------------------------------------------------------------------\ 2268 * 0B | 0x2 [command index] | 2269 * 4B | Length in bytes | 2270 * 8B | b'0: check_response? | b'1-31 reserved | 2271 * 12B | Offset in bytes | 2272 * 16B | Data ... | 2273 * \----------------------------------------------------------------------/ 2274 * Write data as part of a file that was previously started. Data should be 2275 * of length equal to that provided in the message 2276 */ 2277 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2278 const u8 **data, bool *check_resp) 2279 { 2280 u32 offset, len; 2281 int rc; 2282 2283 *data += 4; 2284 len = *((u32 *)(*data)); 2285 *data += 4; 2286 *check_resp = !!(**data & BIT(0)); 2287 *data += 4; 2288 offset = *((u32 *)(*data)); 2289 *data += 4; 2290 2291 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2292 "About to write File-data: %08x bytes to offset %08x\n", 2293 len, offset); 2294 2295 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2296 (char *)(*data), len); 2297 *data += len; 2298 2299 return rc; 2300 } 2301 2302 /* Binary file format [General header] - 2303 * /----------------------------------------------------------------------\ 2304 * 0B | QED_NVM_SIGNATURE | 2305 * 4B | Length in bytes | 2306 * 8B | Highest command in this batchfile | Reserved | 2307 * \----------------------------------------------------------------------/ 2308 */ 2309 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2310 const struct firmware *image, 2311 const u8 **data) 2312 { 2313 u32 signature, len; 2314 2315 /* Check minimum size */ 2316 if (image->size < 12) { 2317 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2318 return -EINVAL; 2319 } 2320 2321 /* Check signature */ 2322 signature = *((u32 *)(*data)); 2323 if (signature != QED_NVM_SIGNATURE) { 2324 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2325 return -EINVAL; 2326 } 2327 2328 *data += 4; 2329 /* Validate internal size equals the image-size */ 2330 len = *((u32 *)(*data)); 2331 if (len != image->size) { 2332 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2333 len, (u32)image->size); 2334 return -EINVAL; 2335 } 2336 2337 *data += 4; 2338 /* Make sure driver familiar with all commands necessary for this */ 2339 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2340 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2341 *((u16 *)(*data))); 2342 return -EINVAL; 2343 } 2344 2345 *data += 4; 2346 2347 return 0; 2348 } 2349 2350 /* Binary file format - 2351 * /----------------------------------------------------------------------\ 2352 * 0B | 0x5 [command index] | 2353 * 4B | Number of config attributes | Reserved | 2354 * 4B | Config ID | Entity ID | Length | 2355 * 4B | Value | 2356 * | | 2357 * \----------------------------------------------------------------------/ 2358 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2359 * 'Number of config attributes'. 2360 * 2361 * The API parses config attributes from the user provided buffer and flashes 2362 * them to the respective NVM path using Management FW inerface. 2363 */ 2364 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2365 { 2366 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2367 u8 entity_id, len, buf[32]; 2368 bool need_nvm_init = true; 2369 struct qed_ptt *ptt; 2370 u16 cfg_id, count; 2371 int rc = 0, i; 2372 u32 flags; 2373 2374 ptt = qed_ptt_acquire(hwfn); 2375 if (!ptt) 2376 return -EAGAIN; 2377 2378 /* NVM CFG ID attribute header */ 2379 *data += 4; 2380 count = *((u16 *)*data); 2381 *data += 4; 2382 2383 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2384 "Read config ids: num_attrs = %0d\n", count); 2385 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2386 * arithmetic operations in the implementation. 2387 */ 2388 for (i = 1; i <= count; i++) { 2389 cfg_id = *((u16 *)*data); 2390 *data += 2; 2391 entity_id = **data; 2392 (*data)++; 2393 len = **data; 2394 (*data)++; 2395 memcpy(buf, *data, len); 2396 *data += len; 2397 2398 flags = 0; 2399 if (need_nvm_init) { 2400 flags |= QED_NVM_CFG_OPTION_INIT; 2401 need_nvm_init = false; 2402 } 2403 2404 /* Commit to flash and free the resources */ 2405 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2406 flags |= QED_NVM_CFG_OPTION_COMMIT | 2407 QED_NVM_CFG_OPTION_FREE; 2408 need_nvm_init = true; 2409 } 2410 2411 if (entity_id) 2412 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2413 2414 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2415 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2416 entity_id, len); 2417 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2418 buf, len); 2419 if (rc) { 2420 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2421 break; 2422 } 2423 } 2424 2425 qed_ptt_release(hwfn, ptt); 2426 2427 return rc; 2428 } 2429 2430 #define QED_MAX_NVM_BUF_LEN 32 2431 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2432 { 2433 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2434 u8 buf[QED_MAX_NVM_BUF_LEN]; 2435 struct qed_ptt *ptt; 2436 u32 len; 2437 int rc; 2438 2439 ptt = qed_ptt_acquire(hwfn); 2440 if (!ptt) 2441 return QED_MAX_NVM_BUF_LEN; 2442 2443 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2444 &len); 2445 if (rc || !len) { 2446 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2447 len = QED_MAX_NVM_BUF_LEN; 2448 } 2449 2450 qed_ptt_release(hwfn, ptt); 2451 2452 return len; 2453 } 2454 2455 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2456 u32 cmd, u32 entity_id) 2457 { 2458 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2459 struct qed_ptt *ptt; 2460 u32 flags, len; 2461 int rc = 0; 2462 2463 ptt = qed_ptt_acquire(hwfn); 2464 if (!ptt) 2465 return -EAGAIN; 2466 2467 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2468 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2469 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2470 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2471 if (rc) 2472 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2473 2474 qed_ptt_release(hwfn, ptt); 2475 2476 return rc; 2477 } 2478 2479 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2480 { 2481 const struct firmware *image; 2482 const u8 *data, *data_end; 2483 u32 cmd_type; 2484 int rc; 2485 2486 rc = request_firmware(&image, name, &cdev->pdev->dev); 2487 if (rc) { 2488 DP_ERR(cdev, "Failed to find '%s'\n", name); 2489 return rc; 2490 } 2491 2492 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2493 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2494 name, image->data, (u32)image->size); 2495 data = image->data; 2496 data_end = data + image->size; 2497 2498 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2499 if (rc) 2500 goto exit; 2501 2502 while (data < data_end) { 2503 bool check_resp = false; 2504 2505 /* Parse the actual command */ 2506 cmd_type = *((u32 *)data); 2507 switch (cmd_type) { 2508 case QED_NVM_FLASH_CMD_FILE_DATA: 2509 rc = qed_nvm_flash_image_file_data(cdev, &data, 2510 &check_resp); 2511 break; 2512 case QED_NVM_FLASH_CMD_FILE_START: 2513 rc = qed_nvm_flash_image_file_start(cdev, &data, 2514 &check_resp); 2515 break; 2516 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2517 rc = qed_nvm_flash_image_access(cdev, &data, 2518 &check_resp); 2519 break; 2520 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2521 rc = qed_nvm_flash_cfg_write(cdev, &data); 2522 break; 2523 default: 2524 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2525 rc = -EINVAL; 2526 goto exit; 2527 } 2528 2529 if (rc) { 2530 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2531 goto exit; 2532 } 2533 2534 /* Check response if needed */ 2535 if (check_resp) { 2536 u32 mcp_response = 0; 2537 2538 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2539 DP_ERR(cdev, "Failed getting MCP response\n"); 2540 rc = -EINVAL; 2541 goto exit; 2542 } 2543 2544 switch (mcp_response & FW_MSG_CODE_MASK) { 2545 case FW_MSG_CODE_OK: 2546 case FW_MSG_CODE_NVM_OK: 2547 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2548 case FW_MSG_CODE_PHY_OK: 2549 break; 2550 default: 2551 DP_ERR(cdev, "MFW returns error: %08x\n", 2552 mcp_response); 2553 rc = -EINVAL; 2554 goto exit; 2555 } 2556 } 2557 } 2558 2559 exit: 2560 release_firmware(image); 2561 2562 return rc; 2563 } 2564 2565 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2566 u8 *buf, u16 len) 2567 { 2568 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2569 2570 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2571 } 2572 2573 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2574 { 2575 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2576 void *cookie = p_hwfn->cdev->ops_cookie; 2577 2578 if (ops && ops->schedule_recovery_handler) 2579 ops->schedule_recovery_handler(cookie); 2580 } 2581 2582 static const char * const qed_hw_err_type_descr[] = { 2583 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2584 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2585 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2586 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2587 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2588 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2589 [QED_HW_ERR_LAST] = "Unknown", 2590 }; 2591 2592 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2593 enum qed_hw_err_type err_type) 2594 { 2595 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2596 void *cookie = p_hwfn->cdev->ops_cookie; 2597 const char *err_str; 2598 2599 if (err_type > QED_HW_ERR_LAST) 2600 err_type = QED_HW_ERR_LAST; 2601 err_str = qed_hw_err_type_descr[err_type]; 2602 2603 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2604 2605 /* Call the HW error handler of the protocol driver. 2606 * If it is not available - perform a minimal handling of preventing 2607 * HW attentions from being reasserted. 2608 */ 2609 if (ops && ops->schedule_hw_err_handler) 2610 ops->schedule_hw_err_handler(cookie, err_type); 2611 else 2612 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2613 } 2614 2615 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2616 void *handle) 2617 { 2618 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2619 } 2620 2621 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2622 { 2623 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2624 struct qed_ptt *ptt; 2625 int status = 0; 2626 2627 ptt = qed_ptt_acquire(hwfn); 2628 if (!ptt) 2629 return -EAGAIN; 2630 2631 status = qed_mcp_set_led(hwfn, ptt, mode); 2632 2633 qed_ptt_release(hwfn, ptt); 2634 2635 return status; 2636 } 2637 2638 static int qed_recovery_process(struct qed_dev *cdev) 2639 { 2640 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2641 struct qed_ptt *p_ptt; 2642 int rc = 0; 2643 2644 p_ptt = qed_ptt_acquire(p_hwfn); 2645 if (!p_ptt) 2646 return -EAGAIN; 2647 2648 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2649 2650 qed_ptt_release(p_hwfn, p_ptt); 2651 2652 return rc; 2653 } 2654 2655 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2656 { 2657 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2658 struct qed_ptt *ptt; 2659 int rc = 0; 2660 2661 if (IS_VF(cdev)) 2662 return 0; 2663 2664 ptt = qed_ptt_acquire(hwfn); 2665 if (!ptt) 2666 return -EAGAIN; 2667 2668 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2669 : QED_OV_WOL_DISABLED); 2670 if (rc) 2671 goto out; 2672 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2673 2674 out: 2675 qed_ptt_release(hwfn, ptt); 2676 return rc; 2677 } 2678 2679 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2680 { 2681 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2682 struct qed_ptt *ptt; 2683 int status = 0; 2684 2685 if (IS_VF(cdev)) 2686 return 0; 2687 2688 ptt = qed_ptt_acquire(hwfn); 2689 if (!ptt) 2690 return -EAGAIN; 2691 2692 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2693 QED_OV_DRIVER_STATE_ACTIVE : 2694 QED_OV_DRIVER_STATE_DISABLED); 2695 2696 qed_ptt_release(hwfn, ptt); 2697 2698 return status; 2699 } 2700 2701 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2702 { 2703 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2704 struct qed_ptt *ptt; 2705 int status = 0; 2706 2707 if (IS_VF(cdev)) 2708 return 0; 2709 2710 ptt = qed_ptt_acquire(hwfn); 2711 if (!ptt) 2712 return -EAGAIN; 2713 2714 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2715 if (status) 2716 goto out; 2717 2718 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2719 2720 out: 2721 qed_ptt_release(hwfn, ptt); 2722 return status; 2723 } 2724 2725 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2726 { 2727 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2728 struct qed_ptt *ptt; 2729 int status = 0; 2730 2731 if (IS_VF(cdev)) 2732 return 0; 2733 2734 ptt = qed_ptt_acquire(hwfn); 2735 if (!ptt) 2736 return -EAGAIN; 2737 2738 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2739 if (status) 2740 goto out; 2741 2742 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2743 2744 out: 2745 qed_ptt_release(hwfn, ptt); 2746 return status; 2747 } 2748 2749 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2750 u8 dev_addr, u32 offset, u32 len) 2751 { 2752 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2753 struct qed_ptt *ptt; 2754 int rc = 0; 2755 2756 if (IS_VF(cdev)) 2757 return 0; 2758 2759 ptt = qed_ptt_acquire(hwfn); 2760 if (!ptt) 2761 return -EAGAIN; 2762 2763 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2764 offset, len, buf); 2765 2766 qed_ptt_release(hwfn, ptt); 2767 2768 return rc; 2769 } 2770 2771 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2772 { 2773 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2774 struct qed_ptt *ptt; 2775 int rc = 0; 2776 2777 if (IS_VF(cdev)) 2778 return 0; 2779 2780 ptt = qed_ptt_acquire(hwfn); 2781 if (!ptt) 2782 return -EAGAIN; 2783 2784 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2785 2786 qed_ptt_release(hwfn, ptt); 2787 2788 return rc; 2789 } 2790 2791 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2792 { 2793 return QED_AFFIN_HWFN_IDX(cdev); 2794 } 2795 2796 static struct qed_selftest_ops qed_selftest_ops_pass = { 2797 .selftest_memory = &qed_selftest_memory, 2798 .selftest_interrupt = &qed_selftest_interrupt, 2799 .selftest_register = &qed_selftest_register, 2800 .selftest_clock = &qed_selftest_clock, 2801 .selftest_nvram = &qed_selftest_nvram, 2802 }; 2803 2804 const struct qed_common_ops qed_common_ops_pass = { 2805 .selftest = &qed_selftest_ops_pass, 2806 .probe = &qed_probe, 2807 .remove = &qed_remove, 2808 .set_power_state = &qed_set_power_state, 2809 .set_name = &qed_set_name, 2810 .update_pf_params = &qed_update_pf_params, 2811 .slowpath_start = &qed_slowpath_start, 2812 .slowpath_stop = &qed_slowpath_stop, 2813 .set_fp_int = &qed_set_int_fp, 2814 .get_fp_int = &qed_get_int_fp, 2815 .sb_init = &qed_sb_init, 2816 .sb_release = &qed_sb_release, 2817 .simd_handler_config = &qed_simd_handler_config, 2818 .simd_handler_clean = &qed_simd_handler_clean, 2819 .dbg_grc = &qed_dbg_grc, 2820 .dbg_grc_size = &qed_dbg_grc_size, 2821 .can_link_change = &qed_can_link_change, 2822 .set_link = &qed_set_link, 2823 .get_link = &qed_get_current_link, 2824 .drain = &qed_drain, 2825 .update_msglvl = &qed_init_dp, 2826 .dbg_all_data = &qed_dbg_all_data, 2827 .dbg_all_data_size = &qed_dbg_all_data_size, 2828 .chain_alloc = &qed_chain_alloc, 2829 .chain_free = &qed_chain_free, 2830 .nvm_flash = &qed_nvm_flash, 2831 .nvm_get_image = &qed_nvm_get_image, 2832 .set_coalesce = &qed_set_coalesce, 2833 .set_led = &qed_set_led, 2834 .recovery_process = &qed_recovery_process, 2835 .recovery_prolog = &qed_recovery_prolog, 2836 .attn_clr_enable = &qed_int_attn_clr_enable, 2837 .update_drv_state = &qed_update_drv_state, 2838 .update_mac = &qed_update_mac, 2839 .update_mtu = &qed_update_mtu, 2840 .update_wol = &qed_update_wol, 2841 .db_recovery_add = &qed_db_recovery_add, 2842 .db_recovery_del = &qed_db_recovery_del, 2843 .read_module_eeprom = &qed_read_module_eeprom, 2844 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 2845 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 2846 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 2847 .set_grc_config = &qed_set_grc_config, 2848 }; 2849 2850 void qed_get_protocol_stats(struct qed_dev *cdev, 2851 enum qed_mcp_protocol_type type, 2852 union qed_mcp_protocol_stats *stats) 2853 { 2854 struct qed_eth_stats eth_stats; 2855 2856 memset(stats, 0, sizeof(*stats)); 2857 2858 switch (type) { 2859 case QED_MCP_LAN_STATS: 2860 qed_get_vport_stats(cdev, ð_stats); 2861 stats->lan_stats.ucast_rx_pkts = 2862 eth_stats.common.rx_ucast_pkts; 2863 stats->lan_stats.ucast_tx_pkts = 2864 eth_stats.common.tx_ucast_pkts; 2865 stats->lan_stats.fcs_err = -1; 2866 break; 2867 case QED_MCP_FCOE_STATS: 2868 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 2869 break; 2870 case QED_MCP_ISCSI_STATS: 2871 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 2872 break; 2873 default: 2874 DP_VERBOSE(cdev, QED_MSG_SP, 2875 "Invalid protocol type = %d\n", type); 2876 return; 2877 } 2878 } 2879 2880 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 2881 { 2882 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 2883 "Scheduling slowpath task [Flag: %d]\n", 2884 QED_SLOWPATH_MFW_TLV_REQ); 2885 smp_mb__before_atomic(); 2886 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 2887 smp_mb__after_atomic(); 2888 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 2889 2890 return 0; 2891 } 2892 2893 static void 2894 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 2895 { 2896 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 2897 struct qed_eth_stats_common *p_common; 2898 struct qed_generic_tlvs gen_tlvs; 2899 struct qed_eth_stats stats; 2900 int i; 2901 2902 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 2903 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 2904 2905 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 2906 tlv->flags.ipv4_csum_offload = true; 2907 if (gen_tlvs.feat_flags & QED_TLV_LSO) 2908 tlv->flags.lso_supported = true; 2909 tlv->flags.b_set = true; 2910 2911 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 2912 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 2913 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 2914 tlv->mac_set[i] = true; 2915 } 2916 } 2917 2918 qed_get_vport_stats(cdev, &stats); 2919 p_common = &stats.common; 2920 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 2921 p_common->rx_bcast_pkts; 2922 tlv->rx_frames_set = true; 2923 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 2924 p_common->rx_bcast_bytes; 2925 tlv->rx_bytes_set = true; 2926 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 2927 p_common->tx_bcast_pkts; 2928 tlv->tx_frames_set = true; 2929 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 2930 p_common->tx_bcast_bytes; 2931 tlv->rx_bytes_set = true; 2932 } 2933 2934 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 2935 union qed_mfw_tlv_data *tlv_buf) 2936 { 2937 struct qed_dev *cdev = hwfn->cdev; 2938 struct qed_common_cb_ops *ops; 2939 2940 ops = cdev->protocol_ops.common; 2941 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 2942 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 2943 return -EINVAL; 2944 } 2945 2946 switch (type) { 2947 case QED_MFW_TLV_GENERIC: 2948 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 2949 break; 2950 case QED_MFW_TLV_ETH: 2951 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 2952 break; 2953 case QED_MFW_TLV_FCOE: 2954 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 2955 break; 2956 case QED_MFW_TLV_ISCSI: 2957 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 2958 break; 2959 default: 2960 break; 2961 } 2962 2963 return 0; 2964 } 2965