1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/aer.h> 27 28 #include "qed.h" 29 #include "qed_sriov.h" 30 #include "qed_sp.h" 31 #include "qed_dev_api.h" 32 #include "qed_ll2.h" 33 #include "qed_fcoe.h" 34 #include "qed_iscsi.h" 35 36 #include "qed_mcp.h" 37 #include "qed_reg_addr.h" 38 #include "qed_hw.h" 39 #include "qed_selftest.h" 40 #include "qed_debug.h" 41 42 #define QED_ROCE_QPS (8192) 43 #define QED_ROCE_DPIS (8) 44 #define QED_RDMA_SRQS QED_ROCE_QPS 45 #define QED_NVM_CFG_GET_FLAGS 0xA 46 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 47 #define QED_NVM_CFG_MAX_ATTRS 50 48 49 static char version[] = 50 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 51 52 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 53 MODULE_LICENSE("GPL"); 54 MODULE_VERSION(DRV_MODULE_VERSION); 55 56 #define FW_FILE_VERSION \ 57 __stringify(FW_MAJOR_VERSION) "." \ 58 __stringify(FW_MINOR_VERSION) "." \ 59 __stringify(FW_REVISION_VERSION) "." \ 60 __stringify(FW_ENGINEERING_VERSION) 61 62 #define QED_FW_FILE_NAME \ 63 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 64 65 MODULE_FIRMWARE(QED_FW_FILE_NAME); 66 67 static int __init qed_init(void) 68 { 69 pr_info("%s", version); 70 71 return 0; 72 } 73 74 static void __exit qed_cleanup(void) 75 { 76 pr_notice("qed_cleanup called\n"); 77 } 78 79 module_init(qed_init); 80 module_exit(qed_cleanup); 81 82 /* Check if the DMA controller on the machine can properly handle the DMA 83 * addressing required by the device. 84 */ 85 static int qed_set_coherency_mask(struct qed_dev *cdev) 86 { 87 struct device *dev = &cdev->pdev->dev; 88 89 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 90 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 91 DP_NOTICE(cdev, 92 "Can't request 64-bit consistent allocations\n"); 93 return -EIO; 94 } 95 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 96 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 97 return -EIO; 98 } 99 100 return 0; 101 } 102 103 static void qed_free_pci(struct qed_dev *cdev) 104 { 105 struct pci_dev *pdev = cdev->pdev; 106 107 pci_disable_pcie_error_reporting(pdev); 108 109 if (cdev->doorbells && cdev->db_size) 110 iounmap(cdev->doorbells); 111 if (cdev->regview) 112 iounmap(cdev->regview); 113 if (atomic_read(&pdev->enable_cnt) == 1) 114 pci_release_regions(pdev); 115 116 pci_disable_device(pdev); 117 } 118 119 #define PCI_REVISION_ID_ERROR_VAL 0xff 120 121 /* Performs PCI initializations as well as initializing PCI-related parameters 122 * in the device structrue. Returns 0 in case of success. 123 */ 124 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 125 { 126 u8 rev_id; 127 int rc; 128 129 cdev->pdev = pdev; 130 131 rc = pci_enable_device(pdev); 132 if (rc) { 133 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 134 goto err0; 135 } 136 137 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 138 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 139 rc = -EIO; 140 goto err1; 141 } 142 143 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 144 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 145 rc = -EIO; 146 goto err1; 147 } 148 149 if (atomic_read(&pdev->enable_cnt) == 1) { 150 rc = pci_request_regions(pdev, "qed"); 151 if (rc) { 152 DP_NOTICE(cdev, 153 "Failed to request PCI memory resources\n"); 154 goto err1; 155 } 156 pci_set_master(pdev); 157 pci_save_state(pdev); 158 } 159 160 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 161 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 162 DP_NOTICE(cdev, 163 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 164 rev_id); 165 rc = -ENODEV; 166 goto err2; 167 } 168 if (!pci_is_pcie(pdev)) { 169 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 170 rc = -EIO; 171 goto err2; 172 } 173 174 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 175 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 176 DP_NOTICE(cdev, "Cannot find power management capability\n"); 177 178 rc = qed_set_coherency_mask(cdev); 179 if (rc) 180 goto err2; 181 182 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 183 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 184 cdev->pci_params.irq = pdev->irq; 185 186 cdev->regview = pci_ioremap_bar(pdev, 0); 187 if (!cdev->regview) { 188 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 189 rc = -ENOMEM; 190 goto err2; 191 } 192 193 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 194 cdev->db_size = pci_resource_len(cdev->pdev, 2); 195 if (!cdev->db_size) { 196 if (IS_PF(cdev)) { 197 DP_NOTICE(cdev, "No Doorbell bar available\n"); 198 return -EINVAL; 199 } else { 200 return 0; 201 } 202 } 203 204 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 205 206 if (!cdev->doorbells) { 207 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 208 return -ENOMEM; 209 } 210 211 /* AER (Advanced Error reporting) configuration */ 212 rc = pci_enable_pcie_error_reporting(pdev); 213 if (rc) 214 DP_VERBOSE(cdev, NETIF_MSG_DRV, 215 "Failed to configure PCIe AER [%d]\n", rc); 216 217 return 0; 218 219 err2: 220 pci_release_regions(pdev); 221 err1: 222 pci_disable_device(pdev); 223 err0: 224 return rc; 225 } 226 227 int qed_fill_dev_info(struct qed_dev *cdev, 228 struct qed_dev_info *dev_info) 229 { 230 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 231 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 232 struct qed_tunnel_info *tun = &cdev->tunnel; 233 struct qed_ptt *ptt; 234 235 memset(dev_info, 0, sizeof(struct qed_dev_info)); 236 237 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 238 tun->vxlan.b_mode_enabled) 239 dev_info->vxlan_enable = true; 240 241 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 242 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 243 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 244 dev_info->gre_enable = true; 245 246 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 247 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 248 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 249 dev_info->geneve_enable = true; 250 251 dev_info->num_hwfns = cdev->num_hwfns; 252 dev_info->pci_mem_start = cdev->pci_params.mem_start; 253 dev_info->pci_mem_end = cdev->pci_params.mem_end; 254 dev_info->pci_irq = cdev->pci_params.irq; 255 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 256 dev_info->dev_type = cdev->type; 257 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 258 259 if (IS_PF(cdev)) { 260 dev_info->fw_major = FW_MAJOR_VERSION; 261 dev_info->fw_minor = FW_MINOR_VERSION; 262 dev_info->fw_rev = FW_REVISION_VERSION; 263 dev_info->fw_eng = FW_ENGINEERING_VERSION; 264 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 265 &cdev->mf_bits); 266 dev_info->tx_switching = true; 267 268 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 269 dev_info->wol_support = true; 270 271 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 272 273 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 274 } else { 275 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 276 &dev_info->fw_minor, &dev_info->fw_rev, 277 &dev_info->fw_eng); 278 } 279 280 if (IS_PF(cdev)) { 281 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 282 if (ptt) { 283 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 284 &dev_info->mfw_rev, NULL); 285 286 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 287 &dev_info->mbi_version); 288 289 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 290 &dev_info->flash_size); 291 292 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 293 } 294 } else { 295 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 296 &dev_info->mfw_rev, NULL); 297 } 298 299 dev_info->mtu = hw_info->mtu; 300 301 return 0; 302 } 303 304 static void qed_free_cdev(struct qed_dev *cdev) 305 { 306 kfree((void *)cdev); 307 } 308 309 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 310 { 311 struct qed_dev *cdev; 312 313 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 314 if (!cdev) 315 return cdev; 316 317 qed_init_struct(cdev); 318 319 return cdev; 320 } 321 322 /* Sets the requested power state */ 323 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 324 { 325 if (!cdev) 326 return -ENODEV; 327 328 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 329 return 0; 330 } 331 332 struct qed_devlink { 333 struct qed_dev *cdev; 334 }; 335 336 enum qed_devlink_param_id { 337 QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 338 QED_DEVLINK_PARAM_ID_IWARP_CMT, 339 }; 340 341 static int qed_dl_param_get(struct devlink *dl, u32 id, 342 struct devlink_param_gset_ctx *ctx) 343 { 344 struct qed_devlink *qed_dl; 345 struct qed_dev *cdev; 346 347 qed_dl = devlink_priv(dl); 348 cdev = qed_dl->cdev; 349 ctx->val.vbool = cdev->iwarp_cmt; 350 351 return 0; 352 } 353 354 static int qed_dl_param_set(struct devlink *dl, u32 id, 355 struct devlink_param_gset_ctx *ctx) 356 { 357 struct qed_devlink *qed_dl; 358 struct qed_dev *cdev; 359 360 qed_dl = devlink_priv(dl); 361 cdev = qed_dl->cdev; 362 cdev->iwarp_cmt = ctx->val.vbool; 363 364 return 0; 365 } 366 367 static const struct devlink_param qed_devlink_params[] = { 368 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, 369 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, 370 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 371 qed_dl_param_get, qed_dl_param_set, NULL), 372 }; 373 374 static const struct devlink_ops qed_dl_ops; 375 376 static int qed_devlink_register(struct qed_dev *cdev) 377 { 378 union devlink_param_value value; 379 struct qed_devlink *qed_dl; 380 struct devlink *dl; 381 int rc; 382 383 dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl)); 384 if (!dl) 385 return -ENOMEM; 386 387 qed_dl = devlink_priv(dl); 388 389 cdev->dl = dl; 390 qed_dl->cdev = cdev; 391 392 rc = devlink_register(dl, &cdev->pdev->dev); 393 if (rc) 394 goto err_free; 395 396 rc = devlink_params_register(dl, qed_devlink_params, 397 ARRAY_SIZE(qed_devlink_params)); 398 if (rc) 399 goto err_unregister; 400 401 value.vbool = false; 402 devlink_param_driverinit_value_set(dl, 403 QED_DEVLINK_PARAM_ID_IWARP_CMT, 404 value); 405 406 devlink_params_publish(dl); 407 cdev->iwarp_cmt = false; 408 409 return 0; 410 411 err_unregister: 412 devlink_unregister(dl); 413 414 err_free: 415 cdev->dl = NULL; 416 devlink_free(dl); 417 418 return rc; 419 } 420 421 static void qed_devlink_unregister(struct qed_dev *cdev) 422 { 423 if (!cdev->dl) 424 return; 425 426 devlink_params_unregister(cdev->dl, qed_devlink_params, 427 ARRAY_SIZE(qed_devlink_params)); 428 429 devlink_unregister(cdev->dl); 430 devlink_free(cdev->dl); 431 } 432 433 /* probing */ 434 static struct qed_dev *qed_probe(struct pci_dev *pdev, 435 struct qed_probe_params *params) 436 { 437 struct qed_dev *cdev; 438 int rc; 439 440 cdev = qed_alloc_cdev(pdev); 441 if (!cdev) 442 goto err0; 443 444 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 445 cdev->protocol = params->protocol; 446 447 if (params->is_vf) 448 cdev->b_is_vf = true; 449 450 qed_init_dp(cdev, params->dp_module, params->dp_level); 451 452 cdev->recov_in_prog = params->recov_in_prog; 453 454 rc = qed_init_pci(cdev, pdev); 455 if (rc) { 456 DP_ERR(cdev, "init pci failed\n"); 457 goto err1; 458 } 459 DP_INFO(cdev, "PCI init completed successfully\n"); 460 461 rc = qed_devlink_register(cdev); 462 if (rc) { 463 DP_INFO(cdev, "Failed to register devlink.\n"); 464 goto err2; 465 } 466 467 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 468 if (rc) { 469 DP_ERR(cdev, "hw prepare failed\n"); 470 goto err2; 471 } 472 473 DP_INFO(cdev, "qed_probe completed successfully\n"); 474 475 return cdev; 476 477 err2: 478 qed_free_pci(cdev); 479 err1: 480 qed_free_cdev(cdev); 481 err0: 482 return NULL; 483 } 484 485 static void qed_remove(struct qed_dev *cdev) 486 { 487 if (!cdev) 488 return; 489 490 qed_hw_remove(cdev); 491 492 qed_free_pci(cdev); 493 494 qed_set_power_state(cdev, PCI_D3hot); 495 496 qed_devlink_unregister(cdev); 497 498 qed_free_cdev(cdev); 499 } 500 501 static void qed_disable_msix(struct qed_dev *cdev) 502 { 503 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 504 pci_disable_msix(cdev->pdev); 505 kfree(cdev->int_params.msix_table); 506 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 507 pci_disable_msi(cdev->pdev); 508 } 509 510 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 511 } 512 513 static int qed_enable_msix(struct qed_dev *cdev, 514 struct qed_int_params *int_params) 515 { 516 int i, rc, cnt; 517 518 cnt = int_params->in.num_vectors; 519 520 for (i = 0; i < cnt; i++) 521 int_params->msix_table[i].entry = i; 522 523 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 524 int_params->in.min_msix_cnt, cnt); 525 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 526 (rc % cdev->num_hwfns)) { 527 pci_disable_msix(cdev->pdev); 528 529 /* If fastpath is initialized, we need at least one interrupt 530 * per hwfn [and the slow path interrupts]. New requested number 531 * should be a multiple of the number of hwfns. 532 */ 533 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 534 DP_NOTICE(cdev, 535 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 536 cnt, int_params->in.num_vectors); 537 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 538 cnt); 539 if (!rc) 540 rc = cnt; 541 } 542 543 if (rc > 0) { 544 /* MSI-x configuration was achieved */ 545 int_params->out.int_mode = QED_INT_MODE_MSIX; 546 int_params->out.num_vectors = rc; 547 rc = 0; 548 } else { 549 DP_NOTICE(cdev, 550 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 551 cnt, rc); 552 } 553 554 return rc; 555 } 556 557 /* This function outputs the int mode and the number of enabled msix vector */ 558 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 559 { 560 struct qed_int_params *int_params = &cdev->int_params; 561 struct msix_entry *tbl; 562 int rc = 0, cnt; 563 564 switch (int_params->in.int_mode) { 565 case QED_INT_MODE_MSIX: 566 /* Allocate MSIX table */ 567 cnt = int_params->in.num_vectors; 568 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 569 if (!int_params->msix_table) { 570 rc = -ENOMEM; 571 goto out; 572 } 573 574 /* Enable MSIX */ 575 rc = qed_enable_msix(cdev, int_params); 576 if (!rc) 577 goto out; 578 579 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 580 kfree(int_params->msix_table); 581 if (force_mode) 582 goto out; 583 /* Fallthrough */ 584 585 case QED_INT_MODE_MSI: 586 if (cdev->num_hwfns == 1) { 587 rc = pci_enable_msi(cdev->pdev); 588 if (!rc) { 589 int_params->out.int_mode = QED_INT_MODE_MSI; 590 goto out; 591 } 592 593 DP_NOTICE(cdev, "Failed to enable MSI\n"); 594 if (force_mode) 595 goto out; 596 } 597 /* Fallthrough */ 598 599 case QED_INT_MODE_INTA: 600 int_params->out.int_mode = QED_INT_MODE_INTA; 601 rc = 0; 602 goto out; 603 default: 604 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 605 int_params->in.int_mode); 606 rc = -EINVAL; 607 } 608 609 out: 610 if (!rc) 611 DP_INFO(cdev, "Using %s interrupts\n", 612 int_params->out.int_mode == QED_INT_MODE_INTA ? 613 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 614 "MSI" : "MSIX"); 615 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 616 617 return rc; 618 } 619 620 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 621 int index, void(*handler)(void *)) 622 { 623 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 624 int relative_idx = index / cdev->num_hwfns; 625 626 hwfn->simd_proto_handler[relative_idx].func = handler; 627 hwfn->simd_proto_handler[relative_idx].token = token; 628 } 629 630 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 631 { 632 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 633 int relative_idx = index / cdev->num_hwfns; 634 635 memset(&hwfn->simd_proto_handler[relative_idx], 0, 636 sizeof(struct qed_simd_fp_handler)); 637 } 638 639 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 640 { 641 tasklet_schedule((struct tasklet_struct *)tasklet); 642 return IRQ_HANDLED; 643 } 644 645 static irqreturn_t qed_single_int(int irq, void *dev_instance) 646 { 647 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 648 struct qed_hwfn *hwfn; 649 irqreturn_t rc = IRQ_NONE; 650 u64 status; 651 int i, j; 652 653 for (i = 0; i < cdev->num_hwfns; i++) { 654 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 655 656 if (!status) 657 continue; 658 659 hwfn = &cdev->hwfns[i]; 660 661 /* Slowpath interrupt */ 662 if (unlikely(status & 0x1)) { 663 tasklet_schedule(hwfn->sp_dpc); 664 status &= ~0x1; 665 rc = IRQ_HANDLED; 666 } 667 668 /* Fastpath interrupts */ 669 for (j = 0; j < 64; j++) { 670 if ((0x2ULL << j) & status) { 671 struct qed_simd_fp_handler *p_handler = 672 &hwfn->simd_proto_handler[j]; 673 674 if (p_handler->func) 675 p_handler->func(p_handler->token); 676 else 677 DP_NOTICE(hwfn, 678 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 679 j, status); 680 681 status &= ~(0x2ULL << j); 682 rc = IRQ_HANDLED; 683 } 684 } 685 686 if (unlikely(status)) 687 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 688 "got an unknown interrupt status 0x%llx\n", 689 status); 690 } 691 692 return rc; 693 } 694 695 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 696 { 697 struct qed_dev *cdev = hwfn->cdev; 698 u32 int_mode; 699 int rc = 0; 700 u8 id; 701 702 int_mode = cdev->int_params.out.int_mode; 703 if (int_mode == QED_INT_MODE_MSIX) { 704 id = hwfn->my_id; 705 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 706 id, cdev->pdev->bus->number, 707 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 708 rc = request_irq(cdev->int_params.msix_table[id].vector, 709 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 710 } else { 711 unsigned long flags = 0; 712 713 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 714 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 715 PCI_FUNC(cdev->pdev->devfn)); 716 717 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 718 flags |= IRQF_SHARED; 719 720 rc = request_irq(cdev->pdev->irq, qed_single_int, 721 flags, cdev->name, cdev); 722 } 723 724 if (rc) 725 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 726 else 727 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 728 "Requested slowpath %s\n", 729 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 730 731 return rc; 732 } 733 734 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 735 { 736 /* Calling the disable function will make sure that any 737 * currently-running function is completed. The following call to the 738 * enable function makes this sequence a flush-like operation. 739 */ 740 if (p_hwfn->b_sp_dpc_enabled) { 741 tasklet_disable(p_hwfn->sp_dpc); 742 tasklet_enable(p_hwfn->sp_dpc); 743 } 744 } 745 746 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 747 { 748 struct qed_dev *cdev = p_hwfn->cdev; 749 u8 id = p_hwfn->my_id; 750 u32 int_mode; 751 752 int_mode = cdev->int_params.out.int_mode; 753 if (int_mode == QED_INT_MODE_MSIX) 754 synchronize_irq(cdev->int_params.msix_table[id].vector); 755 else 756 synchronize_irq(cdev->pdev->irq); 757 758 qed_slowpath_tasklet_flush(p_hwfn); 759 } 760 761 static void qed_slowpath_irq_free(struct qed_dev *cdev) 762 { 763 int i; 764 765 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 766 for_each_hwfn(cdev, i) { 767 if (!cdev->hwfns[i].b_int_requested) 768 break; 769 synchronize_irq(cdev->int_params.msix_table[i].vector); 770 free_irq(cdev->int_params.msix_table[i].vector, 771 cdev->hwfns[i].sp_dpc); 772 } 773 } else { 774 if (QED_LEADING_HWFN(cdev)->b_int_requested) 775 free_irq(cdev->pdev->irq, cdev); 776 } 777 qed_int_disable_post_isr_release(cdev); 778 } 779 780 static int qed_nic_stop(struct qed_dev *cdev) 781 { 782 int i, rc; 783 784 rc = qed_hw_stop(cdev); 785 786 for (i = 0; i < cdev->num_hwfns; i++) { 787 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 788 789 if (p_hwfn->b_sp_dpc_enabled) { 790 tasklet_disable(p_hwfn->sp_dpc); 791 p_hwfn->b_sp_dpc_enabled = false; 792 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 793 "Disabled sp tasklet [hwfn %d] at %p\n", 794 i, p_hwfn->sp_dpc); 795 } 796 } 797 798 qed_dbg_pf_exit(cdev); 799 800 return rc; 801 } 802 803 static int qed_nic_setup(struct qed_dev *cdev) 804 { 805 int rc, i; 806 807 /* Determine if interface is going to require LL2 */ 808 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 809 for (i = 0; i < cdev->num_hwfns; i++) { 810 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 811 812 p_hwfn->using_ll2 = true; 813 } 814 } 815 816 rc = qed_resc_alloc(cdev); 817 if (rc) 818 return rc; 819 820 DP_INFO(cdev, "Allocated qed resources\n"); 821 822 qed_resc_setup(cdev); 823 824 return rc; 825 } 826 827 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 828 { 829 int limit = 0; 830 831 /* Mark the fastpath as free/used */ 832 cdev->int_params.fp_initialized = cnt ? true : false; 833 834 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 835 limit = cdev->num_hwfns * 63; 836 else if (cdev->int_params.fp_msix_cnt) 837 limit = cdev->int_params.fp_msix_cnt; 838 839 if (!limit) 840 return -ENOMEM; 841 842 return min_t(int, cnt, limit); 843 } 844 845 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 846 { 847 memset(info, 0, sizeof(struct qed_int_info)); 848 849 if (!cdev->int_params.fp_initialized) { 850 DP_INFO(cdev, 851 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 852 return -EINVAL; 853 } 854 855 /* Need to expose only MSI-X information; Single IRQ is handled solely 856 * by qed. 857 */ 858 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 859 int msix_base = cdev->int_params.fp_msix_base; 860 861 info->msix_cnt = cdev->int_params.fp_msix_cnt; 862 info->msix = &cdev->int_params.msix_table[msix_base]; 863 } 864 865 return 0; 866 } 867 868 static int qed_slowpath_setup_int(struct qed_dev *cdev, 869 enum qed_int_mode int_mode) 870 { 871 struct qed_sb_cnt_info sb_cnt_info; 872 int num_l2_queues = 0; 873 int rc; 874 int i; 875 876 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 877 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 878 return -EINVAL; 879 } 880 881 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 882 cdev->int_params.in.int_mode = int_mode; 883 for_each_hwfn(cdev, i) { 884 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 885 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 886 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 887 cdev->int_params.in.num_vectors++; /* slowpath */ 888 } 889 890 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 891 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 892 893 if (is_kdump_kernel()) { 894 DP_INFO(cdev, 895 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 896 cdev->int_params.in.min_msix_cnt); 897 cdev->int_params.in.num_vectors = 898 cdev->int_params.in.min_msix_cnt; 899 } 900 901 rc = qed_set_int_mode(cdev, false); 902 if (rc) { 903 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 904 return rc; 905 } 906 907 cdev->int_params.fp_msix_base = cdev->num_hwfns; 908 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 909 cdev->num_hwfns; 910 911 if (!IS_ENABLED(CONFIG_QED_RDMA) || 912 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 913 return 0; 914 915 for_each_hwfn(cdev, i) 916 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 917 918 DP_VERBOSE(cdev, QED_MSG_RDMA, 919 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 920 cdev->int_params.fp_msix_cnt, num_l2_queues); 921 922 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 923 cdev->int_params.rdma_msix_cnt = 924 (cdev->int_params.fp_msix_cnt - num_l2_queues) 925 / cdev->num_hwfns; 926 cdev->int_params.rdma_msix_base = 927 cdev->int_params.fp_msix_base + num_l2_queues; 928 cdev->int_params.fp_msix_cnt = num_l2_queues; 929 } else { 930 cdev->int_params.rdma_msix_cnt = 0; 931 } 932 933 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 934 cdev->int_params.rdma_msix_cnt, 935 cdev->int_params.rdma_msix_base); 936 937 return 0; 938 } 939 940 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 941 { 942 int rc; 943 944 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 945 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 946 947 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 948 &cdev->int_params.in.num_vectors); 949 if (cdev->num_hwfns > 1) { 950 u8 vectors = 0; 951 952 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 953 cdev->int_params.in.num_vectors += vectors; 954 } 955 956 /* We want a minimum of one fastpath vector per vf hwfn */ 957 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 958 959 rc = qed_set_int_mode(cdev, true); 960 if (rc) 961 return rc; 962 963 cdev->int_params.fp_msix_base = 0; 964 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 965 966 return 0; 967 } 968 969 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 970 u8 *input_buf, u32 max_size, u8 *unzip_buf) 971 { 972 int rc; 973 974 p_hwfn->stream->next_in = input_buf; 975 p_hwfn->stream->avail_in = input_len; 976 p_hwfn->stream->next_out = unzip_buf; 977 p_hwfn->stream->avail_out = max_size; 978 979 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 980 981 if (rc != Z_OK) { 982 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 983 rc); 984 return 0; 985 } 986 987 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 988 zlib_inflateEnd(p_hwfn->stream); 989 990 if (rc != Z_OK && rc != Z_STREAM_END) { 991 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 992 p_hwfn->stream->msg, rc); 993 return 0; 994 } 995 996 return p_hwfn->stream->total_out / 4; 997 } 998 999 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1000 { 1001 int i; 1002 void *workspace; 1003 1004 for_each_hwfn(cdev, i) { 1005 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1006 1007 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1008 if (!p_hwfn->stream) 1009 return -ENOMEM; 1010 1011 workspace = vzalloc(zlib_inflate_workspacesize()); 1012 if (!workspace) 1013 return -ENOMEM; 1014 p_hwfn->stream->workspace = workspace; 1015 } 1016 1017 return 0; 1018 } 1019 1020 static void qed_free_stream_mem(struct qed_dev *cdev) 1021 { 1022 int i; 1023 1024 for_each_hwfn(cdev, i) { 1025 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1026 1027 if (!p_hwfn->stream) 1028 return; 1029 1030 vfree(p_hwfn->stream->workspace); 1031 kfree(p_hwfn->stream); 1032 } 1033 } 1034 1035 static void qed_update_pf_params(struct qed_dev *cdev, 1036 struct qed_pf_params *params) 1037 { 1038 int i; 1039 1040 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1041 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1042 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1043 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1044 /* divide by 3 the MRs to avoid MF ILT overflow */ 1045 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1046 } 1047 1048 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1049 params->eth_pf_params.num_arfs_filters = 0; 1050 1051 /* In case we might support RDMA, don't allow qede to be greedy 1052 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1053 * per hwfn. 1054 */ 1055 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1056 u16 *num_cons; 1057 1058 num_cons = ¶ms->eth_pf_params.num_cons; 1059 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1060 } 1061 1062 for (i = 0; i < cdev->num_hwfns; i++) { 1063 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1064 1065 p_hwfn->pf_params = *params; 1066 } 1067 } 1068 1069 #define QED_PERIODIC_DB_REC_COUNT 10 1070 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1071 #define QED_PERIODIC_DB_REC_INTERVAL \ 1072 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1073 1074 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1075 enum qed_slowpath_wq_flag wq_flag, 1076 unsigned long delay) 1077 { 1078 if (!hwfn->slowpath_wq_active) 1079 return -EINVAL; 1080 1081 /* Memory barrier for setting atomic bit */ 1082 smp_mb__before_atomic(); 1083 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1084 smp_mb__after_atomic(); 1085 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1086 1087 return 0; 1088 } 1089 1090 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1091 { 1092 /* Reset periodic Doorbell Recovery counter */ 1093 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1094 1095 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1096 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1097 &p_hwfn->slowpath_task_flags)) 1098 return; 1099 1100 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1101 QED_PERIODIC_DB_REC_INTERVAL); 1102 } 1103 1104 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1105 { 1106 int i; 1107 1108 if (IS_VF(cdev)) 1109 return; 1110 1111 for_each_hwfn(cdev, i) { 1112 if (!cdev->hwfns[i].slowpath_wq) 1113 continue; 1114 1115 /* Stop queuing new delayed works */ 1116 cdev->hwfns[i].slowpath_wq_active = false; 1117 1118 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1119 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1120 } 1121 } 1122 1123 static void qed_slowpath_task(struct work_struct *work) 1124 { 1125 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1126 slowpath_task.work); 1127 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1128 1129 if (!ptt) { 1130 if (hwfn->slowpath_wq_active) 1131 queue_delayed_work(hwfn->slowpath_wq, 1132 &hwfn->slowpath_task, 0); 1133 1134 return; 1135 } 1136 1137 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1138 &hwfn->slowpath_task_flags)) 1139 qed_mfw_process_tlv_req(hwfn, ptt); 1140 1141 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1142 &hwfn->slowpath_task_flags)) { 1143 qed_db_rec_handler(hwfn, ptt); 1144 if (hwfn->periodic_db_rec_count--) 1145 qed_slowpath_delayed_work(hwfn, 1146 QED_SLOWPATH_PERIODIC_DB_REC, 1147 QED_PERIODIC_DB_REC_INTERVAL); 1148 } 1149 1150 qed_ptt_release(hwfn, ptt); 1151 } 1152 1153 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1154 { 1155 struct qed_hwfn *hwfn; 1156 char name[NAME_SIZE]; 1157 int i; 1158 1159 if (IS_VF(cdev)) 1160 return 0; 1161 1162 for_each_hwfn(cdev, i) { 1163 hwfn = &cdev->hwfns[i]; 1164 1165 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1166 cdev->pdev->bus->number, 1167 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1168 1169 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1170 if (!hwfn->slowpath_wq) { 1171 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1172 return -ENOMEM; 1173 } 1174 1175 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1176 hwfn->slowpath_wq_active = true; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int qed_slowpath_start(struct qed_dev *cdev, 1183 struct qed_slowpath_params *params) 1184 { 1185 struct qed_drv_load_params drv_load_params; 1186 struct qed_hw_init_params hw_init_params; 1187 struct qed_mcp_drv_version drv_version; 1188 struct qed_tunnel_info tunn_info; 1189 const u8 *data = NULL; 1190 struct qed_hwfn *hwfn; 1191 struct qed_ptt *p_ptt; 1192 int rc = -EINVAL; 1193 1194 if (qed_iov_wq_start(cdev)) 1195 goto err; 1196 1197 if (qed_slowpath_wq_start(cdev)) 1198 goto err; 1199 1200 if (IS_PF(cdev)) { 1201 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1202 &cdev->pdev->dev); 1203 if (rc) { 1204 DP_NOTICE(cdev, 1205 "Failed to find fw file - /lib/firmware/%s\n", 1206 QED_FW_FILE_NAME); 1207 goto err; 1208 } 1209 1210 if (cdev->num_hwfns == 1) { 1211 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1212 if (p_ptt) { 1213 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1214 } else { 1215 DP_NOTICE(cdev, 1216 "Failed to acquire PTT for aRFS\n"); 1217 goto err; 1218 } 1219 } 1220 } 1221 1222 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1223 rc = qed_nic_setup(cdev); 1224 if (rc) 1225 goto err; 1226 1227 if (IS_PF(cdev)) 1228 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1229 else 1230 rc = qed_slowpath_vf_setup_int(cdev); 1231 if (rc) 1232 goto err1; 1233 1234 if (IS_PF(cdev)) { 1235 /* Allocate stream for unzipping */ 1236 rc = qed_alloc_stream_mem(cdev); 1237 if (rc) 1238 goto err2; 1239 1240 /* First Dword used to differentiate between various sources */ 1241 data = cdev->firmware->data + sizeof(u32); 1242 1243 qed_dbg_pf_init(cdev); 1244 } 1245 1246 /* Start the slowpath */ 1247 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1248 memset(&tunn_info, 0, sizeof(tunn_info)); 1249 tunn_info.vxlan.b_mode_enabled = true; 1250 tunn_info.l2_gre.b_mode_enabled = true; 1251 tunn_info.ip_gre.b_mode_enabled = true; 1252 tunn_info.l2_geneve.b_mode_enabled = true; 1253 tunn_info.ip_geneve.b_mode_enabled = true; 1254 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1255 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1256 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1257 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1258 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1259 hw_init_params.p_tunn = &tunn_info; 1260 hw_init_params.b_hw_start = true; 1261 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1262 hw_init_params.allow_npar_tx_switch = true; 1263 hw_init_params.bin_fw_data = data; 1264 1265 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1266 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1267 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1268 drv_load_params.avoid_eng_reset = false; 1269 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1270 hw_init_params.p_drv_load_params = &drv_load_params; 1271 1272 rc = qed_hw_init(cdev, &hw_init_params); 1273 if (rc) 1274 goto err2; 1275 1276 DP_INFO(cdev, 1277 "HW initialization and function start completed successfully\n"); 1278 1279 if (IS_PF(cdev)) { 1280 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1281 BIT(QED_MODE_L2GENEVE_TUNN) | 1282 BIT(QED_MODE_IPGENEVE_TUNN) | 1283 BIT(QED_MODE_L2GRE_TUNN) | 1284 BIT(QED_MODE_IPGRE_TUNN)); 1285 } 1286 1287 /* Allocate LL2 interface if needed */ 1288 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1289 rc = qed_ll2_alloc_if(cdev); 1290 if (rc) 1291 goto err3; 1292 } 1293 if (IS_PF(cdev)) { 1294 hwfn = QED_LEADING_HWFN(cdev); 1295 drv_version.version = (params->drv_major << 24) | 1296 (params->drv_minor << 16) | 1297 (params->drv_rev << 8) | 1298 (params->drv_eng); 1299 strlcpy(drv_version.name, params->name, 1300 MCP_DRV_VER_STR_SIZE - 4); 1301 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1302 &drv_version); 1303 if (rc) { 1304 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1305 goto err4; 1306 } 1307 } 1308 1309 qed_reset_vport_stats(cdev); 1310 1311 return 0; 1312 1313 err4: 1314 qed_ll2_dealloc_if(cdev); 1315 err3: 1316 qed_hw_stop(cdev); 1317 err2: 1318 qed_hw_timers_stop_all(cdev); 1319 if (IS_PF(cdev)) 1320 qed_slowpath_irq_free(cdev); 1321 qed_free_stream_mem(cdev); 1322 qed_disable_msix(cdev); 1323 err1: 1324 qed_resc_free(cdev); 1325 err: 1326 if (IS_PF(cdev)) 1327 release_firmware(cdev->firmware); 1328 1329 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1330 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1331 qed_ptt_release(QED_LEADING_HWFN(cdev), 1332 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1333 1334 qed_iov_wq_stop(cdev, false); 1335 1336 qed_slowpath_wq_stop(cdev); 1337 1338 return rc; 1339 } 1340 1341 static int qed_slowpath_stop(struct qed_dev *cdev) 1342 { 1343 if (!cdev) 1344 return -ENODEV; 1345 1346 qed_slowpath_wq_stop(cdev); 1347 1348 qed_ll2_dealloc_if(cdev); 1349 1350 if (IS_PF(cdev)) { 1351 if (cdev->num_hwfns == 1) 1352 qed_ptt_release(QED_LEADING_HWFN(cdev), 1353 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1354 qed_free_stream_mem(cdev); 1355 if (IS_QED_ETH_IF(cdev)) 1356 qed_sriov_disable(cdev, true); 1357 } 1358 1359 qed_nic_stop(cdev); 1360 1361 if (IS_PF(cdev)) 1362 qed_slowpath_irq_free(cdev); 1363 1364 qed_disable_msix(cdev); 1365 1366 qed_resc_free(cdev); 1367 1368 qed_iov_wq_stop(cdev, true); 1369 1370 if (IS_PF(cdev)) 1371 release_firmware(cdev->firmware); 1372 1373 return 0; 1374 } 1375 1376 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1377 { 1378 int i; 1379 1380 memcpy(cdev->name, name, NAME_SIZE); 1381 for_each_hwfn(cdev, i) 1382 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1383 } 1384 1385 static u32 qed_sb_init(struct qed_dev *cdev, 1386 struct qed_sb_info *sb_info, 1387 void *sb_virt_addr, 1388 dma_addr_t sb_phy_addr, u16 sb_id, 1389 enum qed_sb_type type) 1390 { 1391 struct qed_hwfn *p_hwfn; 1392 struct qed_ptt *p_ptt; 1393 u16 rel_sb_id; 1394 u32 rc; 1395 1396 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1397 if (type == QED_SB_TYPE_L2_QUEUE) { 1398 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1399 rel_sb_id = sb_id / cdev->num_hwfns; 1400 } else { 1401 p_hwfn = QED_AFFIN_HWFN(cdev); 1402 rel_sb_id = sb_id; 1403 } 1404 1405 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1406 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1407 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1408 1409 if (IS_PF(p_hwfn->cdev)) { 1410 p_ptt = qed_ptt_acquire(p_hwfn); 1411 if (!p_ptt) 1412 return -EBUSY; 1413 1414 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1415 sb_phy_addr, rel_sb_id); 1416 qed_ptt_release(p_hwfn, p_ptt); 1417 } else { 1418 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1419 sb_phy_addr, rel_sb_id); 1420 } 1421 1422 return rc; 1423 } 1424 1425 static u32 qed_sb_release(struct qed_dev *cdev, 1426 struct qed_sb_info *sb_info, 1427 u16 sb_id, 1428 enum qed_sb_type type) 1429 { 1430 struct qed_hwfn *p_hwfn; 1431 u16 rel_sb_id; 1432 u32 rc; 1433 1434 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1435 if (type == QED_SB_TYPE_L2_QUEUE) { 1436 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1437 rel_sb_id = sb_id / cdev->num_hwfns; 1438 } else { 1439 p_hwfn = QED_AFFIN_HWFN(cdev); 1440 rel_sb_id = sb_id; 1441 } 1442 1443 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1444 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1445 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1446 1447 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1448 1449 return rc; 1450 } 1451 1452 static bool qed_can_link_change(struct qed_dev *cdev) 1453 { 1454 return true; 1455 } 1456 1457 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1458 { 1459 struct qed_hwfn *hwfn; 1460 struct qed_mcp_link_params *link_params; 1461 struct qed_ptt *ptt; 1462 u32 sup_caps; 1463 int rc; 1464 1465 if (!cdev) 1466 return -ENODEV; 1467 1468 /* The link should be set only once per PF */ 1469 hwfn = &cdev->hwfns[0]; 1470 1471 /* When VF wants to set link, force it to read the bulletin instead. 1472 * This mimics the PF behavior, where a noitification [both immediate 1473 * and possible later] would be generated when changing properties. 1474 */ 1475 if (IS_VF(cdev)) { 1476 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1477 return 0; 1478 } 1479 1480 ptt = qed_ptt_acquire(hwfn); 1481 if (!ptt) 1482 return -EBUSY; 1483 1484 link_params = qed_mcp_get_link_params(hwfn); 1485 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1486 link_params->speed.autoneg = params->autoneg; 1487 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1488 link_params->speed.advertised_speeds = 0; 1489 sup_caps = QED_LM_1000baseT_Full_BIT | 1490 QED_LM_1000baseKX_Full_BIT | 1491 QED_LM_1000baseX_Full_BIT; 1492 if (params->adv_speeds & sup_caps) 1493 link_params->speed.advertised_speeds |= 1494 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1495 sup_caps = QED_LM_10000baseT_Full_BIT | 1496 QED_LM_10000baseKR_Full_BIT | 1497 QED_LM_10000baseKX4_Full_BIT | 1498 QED_LM_10000baseR_FEC_BIT | 1499 QED_LM_10000baseCR_Full_BIT | 1500 QED_LM_10000baseSR_Full_BIT | 1501 QED_LM_10000baseLR_Full_BIT | 1502 QED_LM_10000baseLRM_Full_BIT; 1503 if (params->adv_speeds & sup_caps) 1504 link_params->speed.advertised_speeds |= 1505 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1506 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) 1507 link_params->speed.advertised_speeds |= 1508 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1509 sup_caps = QED_LM_25000baseKR_Full_BIT | 1510 QED_LM_25000baseCR_Full_BIT | 1511 QED_LM_25000baseSR_Full_BIT; 1512 if (params->adv_speeds & sup_caps) 1513 link_params->speed.advertised_speeds |= 1514 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1515 sup_caps = QED_LM_40000baseLR4_Full_BIT | 1516 QED_LM_40000baseKR4_Full_BIT | 1517 QED_LM_40000baseCR4_Full_BIT | 1518 QED_LM_40000baseSR4_Full_BIT; 1519 if (params->adv_speeds & sup_caps) 1520 link_params->speed.advertised_speeds |= 1521 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1522 sup_caps = QED_LM_50000baseKR2_Full_BIT | 1523 QED_LM_50000baseCR2_Full_BIT | 1524 QED_LM_50000baseSR2_Full_BIT; 1525 if (params->adv_speeds & sup_caps) 1526 link_params->speed.advertised_speeds |= 1527 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1528 sup_caps = QED_LM_100000baseKR4_Full_BIT | 1529 QED_LM_100000baseSR4_Full_BIT | 1530 QED_LM_100000baseCR4_Full_BIT | 1531 QED_LM_100000baseLR4_ER4_Full_BIT; 1532 if (params->adv_speeds & sup_caps) 1533 link_params->speed.advertised_speeds |= 1534 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1535 } 1536 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1537 link_params->speed.forced_speed = params->forced_speed; 1538 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1539 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1540 link_params->pause.autoneg = true; 1541 else 1542 link_params->pause.autoneg = false; 1543 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1544 link_params->pause.forced_rx = true; 1545 else 1546 link_params->pause.forced_rx = false; 1547 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1548 link_params->pause.forced_tx = true; 1549 else 1550 link_params->pause.forced_tx = false; 1551 } 1552 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1553 switch (params->loopback_mode) { 1554 case QED_LINK_LOOPBACK_INT_PHY: 1555 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1556 break; 1557 case QED_LINK_LOOPBACK_EXT_PHY: 1558 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1559 break; 1560 case QED_LINK_LOOPBACK_EXT: 1561 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1562 break; 1563 case QED_LINK_LOOPBACK_MAC: 1564 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1565 break; 1566 default: 1567 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1568 break; 1569 } 1570 } 1571 1572 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1573 memcpy(&link_params->eee, ¶ms->eee, 1574 sizeof(link_params->eee)); 1575 1576 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1577 1578 qed_ptt_release(hwfn, ptt); 1579 1580 return rc; 1581 } 1582 1583 static int qed_get_port_type(u32 media_type) 1584 { 1585 int port_type; 1586 1587 switch (media_type) { 1588 case MEDIA_SFPP_10G_FIBER: 1589 case MEDIA_SFP_1G_FIBER: 1590 case MEDIA_XFP_FIBER: 1591 case MEDIA_MODULE_FIBER: 1592 case MEDIA_KR: 1593 port_type = PORT_FIBRE; 1594 break; 1595 case MEDIA_DA_TWINAX: 1596 port_type = PORT_DA; 1597 break; 1598 case MEDIA_BASE_T: 1599 port_type = PORT_TP; 1600 break; 1601 case MEDIA_NOT_PRESENT: 1602 port_type = PORT_NONE; 1603 break; 1604 case MEDIA_UNSPECIFIED: 1605 default: 1606 port_type = PORT_OTHER; 1607 break; 1608 } 1609 return port_type; 1610 } 1611 1612 static int qed_get_link_data(struct qed_hwfn *hwfn, 1613 struct qed_mcp_link_params *params, 1614 struct qed_mcp_link_state *link, 1615 struct qed_mcp_link_capabilities *link_caps) 1616 { 1617 void *p; 1618 1619 if (!IS_PF(hwfn->cdev)) { 1620 qed_vf_get_link_params(hwfn, params); 1621 qed_vf_get_link_state(hwfn, link); 1622 qed_vf_get_link_caps(hwfn, link_caps); 1623 1624 return 0; 1625 } 1626 1627 p = qed_mcp_get_link_params(hwfn); 1628 if (!p) 1629 return -ENXIO; 1630 memcpy(params, p, sizeof(*params)); 1631 1632 p = qed_mcp_get_link_state(hwfn); 1633 if (!p) 1634 return -ENXIO; 1635 memcpy(link, p, sizeof(*link)); 1636 1637 p = qed_mcp_get_link_capabilities(hwfn); 1638 if (!p) 1639 return -ENXIO; 1640 memcpy(link_caps, p, sizeof(*link_caps)); 1641 1642 return 0; 1643 } 1644 1645 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1646 struct qed_ptt *ptt, u32 capability, 1647 u32 *if_capability) 1648 { 1649 u32 media_type, tcvr_state, tcvr_type; 1650 u32 speed_mask, board_cfg; 1651 1652 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1653 media_type = MEDIA_UNSPECIFIED; 1654 1655 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1656 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1657 1658 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1659 speed_mask = 0xFFFFFFFF; 1660 1661 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1662 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1663 1664 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1665 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1666 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1667 1668 switch (media_type) { 1669 case MEDIA_DA_TWINAX: 1670 *if_capability |= QED_LM_FIBRE_BIT; 1671 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1672 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1673 /* For DAC media multiple speed capabilities are supported*/ 1674 capability = capability & speed_mask; 1675 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1676 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1677 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1678 *if_capability |= QED_LM_10000baseCR_Full_BIT; 1679 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1680 *if_capability |= QED_LM_40000baseCR4_Full_BIT; 1681 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1682 *if_capability |= QED_LM_25000baseCR_Full_BIT; 1683 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1684 *if_capability |= QED_LM_50000baseCR2_Full_BIT; 1685 if (capability & 1686 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1687 *if_capability |= QED_LM_100000baseCR4_Full_BIT; 1688 break; 1689 case MEDIA_BASE_T: 1690 *if_capability |= QED_LM_TP_BIT; 1691 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1692 if (capability & 1693 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1694 *if_capability |= QED_LM_1000baseT_Full_BIT; 1695 } 1696 if (capability & 1697 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1698 *if_capability |= QED_LM_10000baseT_Full_BIT; 1699 } 1700 } 1701 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1702 *if_capability |= QED_LM_FIBRE_BIT; 1703 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) 1704 *if_capability |= QED_LM_1000baseT_Full_BIT; 1705 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) 1706 *if_capability |= QED_LM_10000baseT_Full_BIT; 1707 } 1708 break; 1709 case MEDIA_SFP_1G_FIBER: 1710 case MEDIA_SFPP_10G_FIBER: 1711 case MEDIA_XFP_FIBER: 1712 case MEDIA_MODULE_FIBER: 1713 *if_capability |= QED_LM_FIBRE_BIT; 1714 if (capability & 1715 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1716 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || 1717 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) 1718 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1719 } 1720 if (capability & 1721 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1722 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) 1723 *if_capability |= QED_LM_10000baseSR_Full_BIT; 1724 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) 1725 *if_capability |= QED_LM_10000baseLR_Full_BIT; 1726 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) 1727 *if_capability |= QED_LM_10000baseLRM_Full_BIT; 1728 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) 1729 *if_capability |= QED_LM_10000baseR_FEC_BIT; 1730 } 1731 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1732 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1733 if (capability & 1734 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { 1735 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) 1736 *if_capability |= QED_LM_25000baseSR_Full_BIT; 1737 } 1738 if (capability & 1739 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { 1740 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) 1741 *if_capability |= QED_LM_40000baseLR4_Full_BIT; 1742 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) 1743 *if_capability |= QED_LM_40000baseSR4_Full_BIT; 1744 } 1745 if (capability & 1746 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1747 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1748 if (capability & 1749 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { 1750 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) 1751 *if_capability |= QED_LM_100000baseSR4_Full_BIT; 1752 } 1753 1754 break; 1755 case MEDIA_KR: 1756 *if_capability |= QED_LM_Backplane_BIT; 1757 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1758 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1759 if (capability & 1760 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1761 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1762 if (capability & 1763 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1764 *if_capability |= QED_LM_10000baseKR_Full_BIT; 1765 if (capability & 1766 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1767 *if_capability |= QED_LM_25000baseKR_Full_BIT; 1768 if (capability & 1769 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1770 *if_capability |= QED_LM_40000baseKR4_Full_BIT; 1771 if (capability & 1772 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1773 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1774 if (capability & 1775 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1776 *if_capability |= QED_LM_100000baseKR4_Full_BIT; 1777 break; 1778 case MEDIA_UNSPECIFIED: 1779 case MEDIA_NOT_PRESENT: 1780 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 1781 "Unknown media and transceiver type;\n"); 1782 break; 1783 } 1784 } 1785 1786 static void qed_fill_link(struct qed_hwfn *hwfn, 1787 struct qed_ptt *ptt, 1788 struct qed_link_output *if_link) 1789 { 1790 struct qed_mcp_link_capabilities link_caps; 1791 struct qed_mcp_link_params params; 1792 struct qed_mcp_link_state link; 1793 u32 media_type; 1794 1795 memset(if_link, 0, sizeof(*if_link)); 1796 1797 /* Prepare source inputs */ 1798 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1799 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1800 return; 1801 } 1802 1803 /* Set the link parameters to pass to protocol driver */ 1804 if (link.link_up) 1805 if_link->link_up = true; 1806 1807 /* TODO - at the moment assume supported and advertised speed equal */ 1808 if (link_caps.default_speed_autoneg) 1809 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1810 if (params.pause.autoneg || 1811 (params.pause.forced_rx && params.pause.forced_tx)) 1812 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1813 if (params.pause.autoneg || params.pause.forced_rx || 1814 params.pause.forced_tx) 1815 if_link->supported_caps |= QED_LM_Pause_BIT; 1816 1817 if_link->advertised_caps = if_link->supported_caps; 1818 if (params.speed.autoneg) 1819 if_link->advertised_caps |= QED_LM_Autoneg_BIT; 1820 else 1821 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; 1822 1823 /* Fill link advertised capability*/ 1824 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 1825 &if_link->advertised_caps); 1826 /* Fill link supported capability*/ 1827 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 1828 &if_link->supported_caps); 1829 1830 if (link.link_up) 1831 if_link->speed = link.speed; 1832 1833 /* TODO - fill duplex properly */ 1834 if_link->duplex = DUPLEX_FULL; 1835 qed_mcp_get_media_type(hwfn, ptt, &media_type); 1836 if_link->port = qed_get_port_type(media_type); 1837 1838 if_link->autoneg = params.speed.autoneg; 1839 1840 if (params.pause.autoneg) 1841 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1842 if (params.pause.forced_rx) 1843 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1844 if (params.pause.forced_tx) 1845 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1846 1847 /* Link partner capabilities */ 1848 if (link.partner_adv_speed & 1849 QED_LINK_PARTNER_SPEED_1G_FD) 1850 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1851 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1852 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1853 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) 1854 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; 1855 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1856 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1857 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1858 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1859 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1860 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1861 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1862 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1863 1864 if (link.an_complete) 1865 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1866 1867 if (link.partner_adv_pause) 1868 if_link->lp_caps |= QED_LM_Pause_BIT; 1869 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1870 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1871 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1872 1873 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 1874 if_link->eee_supported = false; 1875 } else { 1876 if_link->eee_supported = true; 1877 if_link->eee_active = link.eee_active; 1878 if_link->sup_caps = link_caps.eee_speed_caps; 1879 /* MFW clears adv_caps on eee disable; use configured value */ 1880 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 1881 params.eee.adv_caps; 1882 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 1883 if_link->eee.enable = params.eee.enable; 1884 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 1885 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 1886 } 1887 } 1888 1889 static void qed_get_current_link(struct qed_dev *cdev, 1890 struct qed_link_output *if_link) 1891 { 1892 struct qed_hwfn *hwfn; 1893 struct qed_ptt *ptt; 1894 int i; 1895 1896 hwfn = &cdev->hwfns[0]; 1897 if (IS_PF(cdev)) { 1898 ptt = qed_ptt_acquire(hwfn); 1899 if (ptt) { 1900 qed_fill_link(hwfn, ptt, if_link); 1901 qed_ptt_release(hwfn, ptt); 1902 } else { 1903 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 1904 } 1905 } else { 1906 qed_fill_link(hwfn, NULL, if_link); 1907 } 1908 1909 for_each_hwfn(cdev, i) 1910 qed_inform_vf_link_state(&cdev->hwfns[i]); 1911 } 1912 1913 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 1914 { 1915 void *cookie = hwfn->cdev->ops_cookie; 1916 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1917 struct qed_link_output if_link; 1918 1919 qed_fill_link(hwfn, ptt, &if_link); 1920 qed_inform_vf_link_state(hwfn); 1921 1922 if (IS_LEAD_HWFN(hwfn) && cookie) 1923 op->link_update(cookie, &if_link); 1924 } 1925 1926 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 1927 { 1928 void *cookie = hwfn->cdev->ops_cookie; 1929 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1930 1931 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 1932 op->bw_update(cookie); 1933 } 1934 1935 static int qed_drain(struct qed_dev *cdev) 1936 { 1937 struct qed_hwfn *hwfn; 1938 struct qed_ptt *ptt; 1939 int i, rc; 1940 1941 if (IS_VF(cdev)) 1942 return 0; 1943 1944 for_each_hwfn(cdev, i) { 1945 hwfn = &cdev->hwfns[i]; 1946 ptt = qed_ptt_acquire(hwfn); 1947 if (!ptt) { 1948 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1949 return -EBUSY; 1950 } 1951 rc = qed_mcp_drain(hwfn, ptt); 1952 qed_ptt_release(hwfn, ptt); 1953 if (rc) 1954 return rc; 1955 } 1956 1957 return 0; 1958 } 1959 1960 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 1961 struct qed_nvm_image_att *nvm_image, 1962 u32 *crc) 1963 { 1964 u8 *buf = NULL; 1965 int rc; 1966 1967 /* Allocate a buffer for holding the nvram image */ 1968 buf = kzalloc(nvm_image->length, GFP_KERNEL); 1969 if (!buf) 1970 return -ENOMEM; 1971 1972 /* Read image into buffer */ 1973 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 1974 buf, nvm_image->length); 1975 if (rc) { 1976 DP_ERR(cdev, "Failed reading image from nvm\n"); 1977 goto out; 1978 } 1979 1980 /* Convert the buffer into big-endian format (excluding the 1981 * closing 4 bytes of CRC). 1982 */ 1983 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 1984 DIV_ROUND_UP(nvm_image->length - 4, 4)); 1985 1986 /* Calc CRC for the "actual" image buffer, i.e. not including 1987 * the last 4 CRC bytes. 1988 */ 1989 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 1990 *crc = (__force u32)cpu_to_be32p(crc); 1991 1992 out: 1993 kfree(buf); 1994 1995 return rc; 1996 } 1997 1998 /* Binary file format - 1999 * /----------------------------------------------------------------------\ 2000 * 0B | 0x4 [command index] | 2001 * 4B | image_type | Options | Number of register settings | 2002 * 8B | Value | 2003 * 12B | Mask | 2004 * 16B | Offset | 2005 * \----------------------------------------------------------------------/ 2006 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2007 * Options - 0'b - Calculate & Update CRC for image 2008 */ 2009 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2010 bool *check_resp) 2011 { 2012 struct qed_nvm_image_att nvm_image; 2013 struct qed_hwfn *p_hwfn; 2014 bool is_crc = false; 2015 u32 image_type; 2016 int rc = 0, i; 2017 u16 len; 2018 2019 *data += 4; 2020 image_type = **data; 2021 p_hwfn = QED_LEADING_HWFN(cdev); 2022 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2023 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2024 break; 2025 if (i == p_hwfn->nvm_info.num_images) { 2026 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2027 image_type); 2028 return -ENOENT; 2029 } 2030 2031 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2032 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2033 2034 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2035 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2036 **data, image_type, nvm_image.start_addr, 2037 nvm_image.start_addr + nvm_image.length - 1); 2038 (*data)++; 2039 is_crc = !!(**data & BIT(0)); 2040 (*data)++; 2041 len = *((u16 *)*data); 2042 *data += 2; 2043 if (is_crc) { 2044 u32 crc = 0; 2045 2046 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2047 if (rc) { 2048 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2049 goto exit; 2050 } 2051 2052 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2053 (nvm_image.start_addr + 2054 nvm_image.length - 4), (u8 *)&crc, 4); 2055 if (rc) 2056 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2057 nvm_image.start_addr + nvm_image.length - 4, rc); 2058 goto exit; 2059 } 2060 2061 /* Iterate over the values for setting */ 2062 while (len) { 2063 u32 offset, mask, value, cur_value; 2064 u8 buf[4]; 2065 2066 value = *((u32 *)*data); 2067 *data += 4; 2068 mask = *((u32 *)*data); 2069 *data += 4; 2070 offset = *((u32 *)*data); 2071 *data += 4; 2072 2073 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2074 4); 2075 if (rc) { 2076 DP_ERR(cdev, "Failed reading from %08x\n", 2077 nvm_image.start_addr + offset); 2078 goto exit; 2079 } 2080 2081 cur_value = le32_to_cpu(*((__le32 *)buf)); 2082 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2083 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2084 nvm_image.start_addr + offset, cur_value, 2085 (cur_value & ~mask) | (value & mask), value, mask); 2086 value = (value & mask) | (cur_value & ~mask); 2087 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2088 nvm_image.start_addr + offset, 2089 (u8 *)&value, 4); 2090 if (rc) { 2091 DP_ERR(cdev, "Failed writing to %08x\n", 2092 nvm_image.start_addr + offset); 2093 goto exit; 2094 } 2095 2096 len--; 2097 } 2098 exit: 2099 return rc; 2100 } 2101 2102 /* Binary file format - 2103 * /----------------------------------------------------------------------\ 2104 * 0B | 0x3 [command index] | 2105 * 4B | b'0: check_response? | b'1-31 reserved | 2106 * 8B | File-type | reserved | 2107 * 12B | Image length in bytes | 2108 * \----------------------------------------------------------------------/ 2109 * Start a new file of the provided type 2110 */ 2111 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2112 const u8 **data, bool *check_resp) 2113 { 2114 u32 file_type, file_size = 0; 2115 int rc; 2116 2117 *data += 4; 2118 *check_resp = !!(**data & BIT(0)); 2119 *data += 4; 2120 file_type = **data; 2121 2122 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2123 "About to start a new file of type %02x\n", file_type); 2124 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2125 *data += 4; 2126 file_size = *((u32 *)(*data)); 2127 } 2128 2129 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2130 (u8 *)(&file_size), 4); 2131 *data += 4; 2132 2133 return rc; 2134 } 2135 2136 /* Binary file format - 2137 * /----------------------------------------------------------------------\ 2138 * 0B | 0x2 [command index] | 2139 * 4B | Length in bytes | 2140 * 8B | b'0: check_response? | b'1-31 reserved | 2141 * 12B | Offset in bytes | 2142 * 16B | Data ... | 2143 * \----------------------------------------------------------------------/ 2144 * Write data as part of a file that was previously started. Data should be 2145 * of length equal to that provided in the message 2146 */ 2147 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2148 const u8 **data, bool *check_resp) 2149 { 2150 u32 offset, len; 2151 int rc; 2152 2153 *data += 4; 2154 len = *((u32 *)(*data)); 2155 *data += 4; 2156 *check_resp = !!(**data & BIT(0)); 2157 *data += 4; 2158 offset = *((u32 *)(*data)); 2159 *data += 4; 2160 2161 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2162 "About to write File-data: %08x bytes to offset %08x\n", 2163 len, offset); 2164 2165 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2166 (char *)(*data), len); 2167 *data += len; 2168 2169 return rc; 2170 } 2171 2172 /* Binary file format [General header] - 2173 * /----------------------------------------------------------------------\ 2174 * 0B | QED_NVM_SIGNATURE | 2175 * 4B | Length in bytes | 2176 * 8B | Highest command in this batchfile | Reserved | 2177 * \----------------------------------------------------------------------/ 2178 */ 2179 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2180 const struct firmware *image, 2181 const u8 **data) 2182 { 2183 u32 signature, len; 2184 2185 /* Check minimum size */ 2186 if (image->size < 12) { 2187 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2188 return -EINVAL; 2189 } 2190 2191 /* Check signature */ 2192 signature = *((u32 *)(*data)); 2193 if (signature != QED_NVM_SIGNATURE) { 2194 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2195 return -EINVAL; 2196 } 2197 2198 *data += 4; 2199 /* Validate internal size equals the image-size */ 2200 len = *((u32 *)(*data)); 2201 if (len != image->size) { 2202 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2203 len, (u32)image->size); 2204 return -EINVAL; 2205 } 2206 2207 *data += 4; 2208 /* Make sure driver familiar with all commands necessary for this */ 2209 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2210 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2211 *((u16 *)(*data))); 2212 return -EINVAL; 2213 } 2214 2215 *data += 4; 2216 2217 return 0; 2218 } 2219 2220 /* Binary file format - 2221 * /----------------------------------------------------------------------\ 2222 * 0B | 0x5 [command index] | 2223 * 4B | Number of config attributes | Reserved | 2224 * 4B | Config ID | Entity ID | Length | 2225 * 4B | Value | 2226 * | | 2227 * \----------------------------------------------------------------------/ 2228 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2229 * 'Number of config attributes'. 2230 * 2231 * The API parses config attributes from the user provided buffer and flashes 2232 * them to the respective NVM path using Management FW inerface. 2233 */ 2234 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2235 { 2236 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2237 u8 entity_id, len, buf[32]; 2238 bool need_nvm_init = true; 2239 struct qed_ptt *ptt; 2240 u16 cfg_id, count; 2241 int rc = 0, i; 2242 u32 flags; 2243 2244 ptt = qed_ptt_acquire(hwfn); 2245 if (!ptt) 2246 return -EAGAIN; 2247 2248 /* NVM CFG ID attribute header */ 2249 *data += 4; 2250 count = *((u16 *)*data); 2251 *data += 4; 2252 2253 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2254 "Read config ids: num_attrs = %0d\n", count); 2255 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2256 * arithmetic operations in the implementation. 2257 */ 2258 for (i = 1; i <= count; i++) { 2259 cfg_id = *((u16 *)*data); 2260 *data += 2; 2261 entity_id = **data; 2262 (*data)++; 2263 len = **data; 2264 (*data)++; 2265 memcpy(buf, *data, len); 2266 *data += len; 2267 2268 flags = 0; 2269 if (need_nvm_init) { 2270 flags |= QED_NVM_CFG_OPTION_INIT; 2271 need_nvm_init = false; 2272 } 2273 2274 /* Commit to flash and free the resources */ 2275 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2276 flags |= QED_NVM_CFG_OPTION_COMMIT | 2277 QED_NVM_CFG_OPTION_FREE; 2278 need_nvm_init = true; 2279 } 2280 2281 if (entity_id) 2282 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2283 2284 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2285 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2286 entity_id, len); 2287 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2288 buf, len); 2289 if (rc) { 2290 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2291 break; 2292 } 2293 } 2294 2295 qed_ptt_release(hwfn, ptt); 2296 2297 return rc; 2298 } 2299 2300 #define QED_MAX_NVM_BUF_LEN 32 2301 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2302 { 2303 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2304 u8 buf[QED_MAX_NVM_BUF_LEN]; 2305 struct qed_ptt *ptt; 2306 u32 len; 2307 int rc; 2308 2309 ptt = qed_ptt_acquire(hwfn); 2310 if (!ptt) 2311 return QED_MAX_NVM_BUF_LEN; 2312 2313 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2314 &len); 2315 if (rc || !len) { 2316 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2317 len = QED_MAX_NVM_BUF_LEN; 2318 } 2319 2320 qed_ptt_release(hwfn, ptt); 2321 2322 return len; 2323 } 2324 2325 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2326 u32 cmd, u32 entity_id) 2327 { 2328 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2329 struct qed_ptt *ptt; 2330 u32 flags, len; 2331 int rc = 0; 2332 2333 ptt = qed_ptt_acquire(hwfn); 2334 if (!ptt) 2335 return -EAGAIN; 2336 2337 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2338 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2339 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2340 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2341 if (rc) 2342 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2343 2344 qed_ptt_release(hwfn, ptt); 2345 2346 return rc; 2347 } 2348 2349 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2350 { 2351 const struct firmware *image; 2352 const u8 *data, *data_end; 2353 u32 cmd_type; 2354 int rc; 2355 2356 rc = request_firmware(&image, name, &cdev->pdev->dev); 2357 if (rc) { 2358 DP_ERR(cdev, "Failed to find '%s'\n", name); 2359 return rc; 2360 } 2361 2362 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2363 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2364 name, image->data, (u32)image->size); 2365 data = image->data; 2366 data_end = data + image->size; 2367 2368 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2369 if (rc) 2370 goto exit; 2371 2372 while (data < data_end) { 2373 bool check_resp = false; 2374 2375 /* Parse the actual command */ 2376 cmd_type = *((u32 *)data); 2377 switch (cmd_type) { 2378 case QED_NVM_FLASH_CMD_FILE_DATA: 2379 rc = qed_nvm_flash_image_file_data(cdev, &data, 2380 &check_resp); 2381 break; 2382 case QED_NVM_FLASH_CMD_FILE_START: 2383 rc = qed_nvm_flash_image_file_start(cdev, &data, 2384 &check_resp); 2385 break; 2386 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2387 rc = qed_nvm_flash_image_access(cdev, &data, 2388 &check_resp); 2389 break; 2390 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2391 rc = qed_nvm_flash_cfg_write(cdev, &data); 2392 break; 2393 default: 2394 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2395 rc = -EINVAL; 2396 goto exit; 2397 } 2398 2399 if (rc) { 2400 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2401 goto exit; 2402 } 2403 2404 /* Check response if needed */ 2405 if (check_resp) { 2406 u32 mcp_response = 0; 2407 2408 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2409 DP_ERR(cdev, "Failed getting MCP response\n"); 2410 rc = -EINVAL; 2411 goto exit; 2412 } 2413 2414 switch (mcp_response & FW_MSG_CODE_MASK) { 2415 case FW_MSG_CODE_OK: 2416 case FW_MSG_CODE_NVM_OK: 2417 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2418 case FW_MSG_CODE_PHY_OK: 2419 break; 2420 default: 2421 DP_ERR(cdev, "MFW returns error: %08x\n", 2422 mcp_response); 2423 rc = -EINVAL; 2424 goto exit; 2425 } 2426 } 2427 } 2428 2429 exit: 2430 release_firmware(image); 2431 2432 return rc; 2433 } 2434 2435 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2436 u8 *buf, u16 len) 2437 { 2438 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2439 2440 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2441 } 2442 2443 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2444 { 2445 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2446 void *cookie = p_hwfn->cdev->ops_cookie; 2447 2448 if (ops && ops->schedule_recovery_handler) 2449 ops->schedule_recovery_handler(cookie); 2450 } 2451 2452 static const char * const qed_hw_err_type_descr[] = { 2453 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2454 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2455 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2456 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2457 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2458 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2459 [QED_HW_ERR_LAST] = "Unknown", 2460 }; 2461 2462 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2463 enum qed_hw_err_type err_type) 2464 { 2465 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2466 void *cookie = p_hwfn->cdev->ops_cookie; 2467 const char *err_str; 2468 2469 if (err_type > QED_HW_ERR_LAST) 2470 err_type = QED_HW_ERR_LAST; 2471 err_str = qed_hw_err_type_descr[err_type]; 2472 2473 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2474 2475 /* Call the HW error handler of the protocol driver. 2476 * If it is not available - perform a minimal handling of preventing 2477 * HW attentions from being reasserted. 2478 */ 2479 if (ops && ops->schedule_hw_err_handler) 2480 ops->schedule_hw_err_handler(cookie, err_type); 2481 else 2482 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2483 } 2484 2485 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2486 void *handle) 2487 { 2488 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2489 } 2490 2491 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2492 { 2493 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2494 struct qed_ptt *ptt; 2495 int status = 0; 2496 2497 ptt = qed_ptt_acquire(hwfn); 2498 if (!ptt) 2499 return -EAGAIN; 2500 2501 status = qed_mcp_set_led(hwfn, ptt, mode); 2502 2503 qed_ptt_release(hwfn, ptt); 2504 2505 return status; 2506 } 2507 2508 static int qed_recovery_process(struct qed_dev *cdev) 2509 { 2510 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2511 struct qed_ptt *p_ptt; 2512 int rc = 0; 2513 2514 p_ptt = qed_ptt_acquire(p_hwfn); 2515 if (!p_ptt) 2516 return -EAGAIN; 2517 2518 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2519 2520 qed_ptt_release(p_hwfn, p_ptt); 2521 2522 return rc; 2523 } 2524 2525 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2526 { 2527 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2528 struct qed_ptt *ptt; 2529 int rc = 0; 2530 2531 if (IS_VF(cdev)) 2532 return 0; 2533 2534 ptt = qed_ptt_acquire(hwfn); 2535 if (!ptt) 2536 return -EAGAIN; 2537 2538 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2539 : QED_OV_WOL_DISABLED); 2540 if (rc) 2541 goto out; 2542 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2543 2544 out: 2545 qed_ptt_release(hwfn, ptt); 2546 return rc; 2547 } 2548 2549 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2550 { 2551 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2552 struct qed_ptt *ptt; 2553 int status = 0; 2554 2555 if (IS_VF(cdev)) 2556 return 0; 2557 2558 ptt = qed_ptt_acquire(hwfn); 2559 if (!ptt) 2560 return -EAGAIN; 2561 2562 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2563 QED_OV_DRIVER_STATE_ACTIVE : 2564 QED_OV_DRIVER_STATE_DISABLED); 2565 2566 qed_ptt_release(hwfn, ptt); 2567 2568 return status; 2569 } 2570 2571 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2572 { 2573 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2574 struct qed_ptt *ptt; 2575 int status = 0; 2576 2577 if (IS_VF(cdev)) 2578 return 0; 2579 2580 ptt = qed_ptt_acquire(hwfn); 2581 if (!ptt) 2582 return -EAGAIN; 2583 2584 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2585 if (status) 2586 goto out; 2587 2588 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2589 2590 out: 2591 qed_ptt_release(hwfn, ptt); 2592 return status; 2593 } 2594 2595 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2596 { 2597 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2598 struct qed_ptt *ptt; 2599 int status = 0; 2600 2601 if (IS_VF(cdev)) 2602 return 0; 2603 2604 ptt = qed_ptt_acquire(hwfn); 2605 if (!ptt) 2606 return -EAGAIN; 2607 2608 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2609 if (status) 2610 goto out; 2611 2612 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2613 2614 out: 2615 qed_ptt_release(hwfn, ptt); 2616 return status; 2617 } 2618 2619 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2620 u8 dev_addr, u32 offset, u32 len) 2621 { 2622 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2623 struct qed_ptt *ptt; 2624 int rc = 0; 2625 2626 if (IS_VF(cdev)) 2627 return 0; 2628 2629 ptt = qed_ptt_acquire(hwfn); 2630 if (!ptt) 2631 return -EAGAIN; 2632 2633 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2634 offset, len, buf); 2635 2636 qed_ptt_release(hwfn, ptt); 2637 2638 return rc; 2639 } 2640 2641 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2642 { 2643 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2644 struct qed_ptt *ptt; 2645 int rc = 0; 2646 2647 if (IS_VF(cdev)) 2648 return 0; 2649 2650 ptt = qed_ptt_acquire(hwfn); 2651 if (!ptt) 2652 return -EAGAIN; 2653 2654 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2655 2656 qed_ptt_release(hwfn, ptt); 2657 2658 return rc; 2659 } 2660 2661 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2662 { 2663 return QED_AFFIN_HWFN_IDX(cdev); 2664 } 2665 2666 static struct qed_selftest_ops qed_selftest_ops_pass = { 2667 .selftest_memory = &qed_selftest_memory, 2668 .selftest_interrupt = &qed_selftest_interrupt, 2669 .selftest_register = &qed_selftest_register, 2670 .selftest_clock = &qed_selftest_clock, 2671 .selftest_nvram = &qed_selftest_nvram, 2672 }; 2673 2674 const struct qed_common_ops qed_common_ops_pass = { 2675 .selftest = &qed_selftest_ops_pass, 2676 .probe = &qed_probe, 2677 .remove = &qed_remove, 2678 .set_power_state = &qed_set_power_state, 2679 .set_name = &qed_set_name, 2680 .update_pf_params = &qed_update_pf_params, 2681 .slowpath_start = &qed_slowpath_start, 2682 .slowpath_stop = &qed_slowpath_stop, 2683 .set_fp_int = &qed_set_int_fp, 2684 .get_fp_int = &qed_get_int_fp, 2685 .sb_init = &qed_sb_init, 2686 .sb_release = &qed_sb_release, 2687 .simd_handler_config = &qed_simd_handler_config, 2688 .simd_handler_clean = &qed_simd_handler_clean, 2689 .dbg_grc = &qed_dbg_grc, 2690 .dbg_grc_size = &qed_dbg_grc_size, 2691 .can_link_change = &qed_can_link_change, 2692 .set_link = &qed_set_link, 2693 .get_link = &qed_get_current_link, 2694 .drain = &qed_drain, 2695 .update_msglvl = &qed_init_dp, 2696 .dbg_all_data = &qed_dbg_all_data, 2697 .dbg_all_data_size = &qed_dbg_all_data_size, 2698 .chain_alloc = &qed_chain_alloc, 2699 .chain_free = &qed_chain_free, 2700 .nvm_flash = &qed_nvm_flash, 2701 .nvm_get_image = &qed_nvm_get_image, 2702 .set_coalesce = &qed_set_coalesce, 2703 .set_led = &qed_set_led, 2704 .recovery_process = &qed_recovery_process, 2705 .recovery_prolog = &qed_recovery_prolog, 2706 .attn_clr_enable = &qed_int_attn_clr_enable, 2707 .update_drv_state = &qed_update_drv_state, 2708 .update_mac = &qed_update_mac, 2709 .update_mtu = &qed_update_mtu, 2710 .update_wol = &qed_update_wol, 2711 .db_recovery_add = &qed_db_recovery_add, 2712 .db_recovery_del = &qed_db_recovery_del, 2713 .read_module_eeprom = &qed_read_module_eeprom, 2714 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 2715 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 2716 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 2717 .set_grc_config = &qed_set_grc_config, 2718 }; 2719 2720 void qed_get_protocol_stats(struct qed_dev *cdev, 2721 enum qed_mcp_protocol_type type, 2722 union qed_mcp_protocol_stats *stats) 2723 { 2724 struct qed_eth_stats eth_stats; 2725 2726 memset(stats, 0, sizeof(*stats)); 2727 2728 switch (type) { 2729 case QED_MCP_LAN_STATS: 2730 qed_get_vport_stats(cdev, ð_stats); 2731 stats->lan_stats.ucast_rx_pkts = 2732 eth_stats.common.rx_ucast_pkts; 2733 stats->lan_stats.ucast_tx_pkts = 2734 eth_stats.common.tx_ucast_pkts; 2735 stats->lan_stats.fcs_err = -1; 2736 break; 2737 case QED_MCP_FCOE_STATS: 2738 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 2739 break; 2740 case QED_MCP_ISCSI_STATS: 2741 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 2742 break; 2743 default: 2744 DP_VERBOSE(cdev, QED_MSG_SP, 2745 "Invalid protocol type = %d\n", type); 2746 return; 2747 } 2748 } 2749 2750 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 2751 { 2752 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 2753 "Scheduling slowpath task [Flag: %d]\n", 2754 QED_SLOWPATH_MFW_TLV_REQ); 2755 smp_mb__before_atomic(); 2756 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 2757 smp_mb__after_atomic(); 2758 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 2759 2760 return 0; 2761 } 2762 2763 static void 2764 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 2765 { 2766 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 2767 struct qed_eth_stats_common *p_common; 2768 struct qed_generic_tlvs gen_tlvs; 2769 struct qed_eth_stats stats; 2770 int i; 2771 2772 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 2773 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 2774 2775 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 2776 tlv->flags.ipv4_csum_offload = true; 2777 if (gen_tlvs.feat_flags & QED_TLV_LSO) 2778 tlv->flags.lso_supported = true; 2779 tlv->flags.b_set = true; 2780 2781 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 2782 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 2783 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 2784 tlv->mac_set[i] = true; 2785 } 2786 } 2787 2788 qed_get_vport_stats(cdev, &stats); 2789 p_common = &stats.common; 2790 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 2791 p_common->rx_bcast_pkts; 2792 tlv->rx_frames_set = true; 2793 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 2794 p_common->rx_bcast_bytes; 2795 tlv->rx_bytes_set = true; 2796 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 2797 p_common->tx_bcast_pkts; 2798 tlv->tx_frames_set = true; 2799 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 2800 p_common->tx_bcast_bytes; 2801 tlv->rx_bytes_set = true; 2802 } 2803 2804 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 2805 union qed_mfw_tlv_data *tlv_buf) 2806 { 2807 struct qed_dev *cdev = hwfn->cdev; 2808 struct qed_common_cb_ops *ops; 2809 2810 ops = cdev->protocol_ops.common; 2811 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 2812 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 2813 return -EINVAL; 2814 } 2815 2816 switch (type) { 2817 case QED_MFW_TLV_GENERIC: 2818 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 2819 break; 2820 case QED_MFW_TLV_ETH: 2821 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 2822 break; 2823 case QED_MFW_TLV_FCOE: 2824 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 2825 break; 2826 case QED_MFW_TLV_ISCSI: 2827 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 2828 break; 2829 default: 2830 break; 2831 } 2832 2833 return 0; 2834 } 2835