1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/string.h> 41 #include <linux/module.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/ethtool.h> 45 #include <linux/etherdevice.h> 46 #include <linux/vmalloc.h> 47 #include <linux/crash_dump.h> 48 #include <linux/crc32.h> 49 #include <linux/qed/qed_if.h> 50 #include <linux/qed/qed_ll2_if.h> 51 #include <net/devlink.h> 52 53 #include "qed.h" 54 #include "qed_sriov.h" 55 #include "qed_sp.h" 56 #include "qed_dev_api.h" 57 #include "qed_ll2.h" 58 #include "qed_fcoe.h" 59 #include "qed_iscsi.h" 60 61 #include "qed_mcp.h" 62 #include "qed_reg_addr.h" 63 #include "qed_hw.h" 64 #include "qed_selftest.h" 65 #include "qed_debug.h" 66 67 #define QED_ROCE_QPS (8192) 68 #define QED_ROCE_DPIS (8) 69 #define QED_RDMA_SRQS QED_ROCE_QPS 70 #define QED_NVM_CFG_GET_FLAGS 0xA 71 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 72 #define QED_NVM_CFG_MAX_ATTRS 50 73 74 static char version[] = 75 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 76 77 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 78 MODULE_LICENSE("GPL"); 79 MODULE_VERSION(DRV_MODULE_VERSION); 80 81 #define FW_FILE_VERSION \ 82 __stringify(FW_MAJOR_VERSION) "." \ 83 __stringify(FW_MINOR_VERSION) "." \ 84 __stringify(FW_REVISION_VERSION) "." \ 85 __stringify(FW_ENGINEERING_VERSION) 86 87 #define QED_FW_FILE_NAME \ 88 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 89 90 MODULE_FIRMWARE(QED_FW_FILE_NAME); 91 92 static int __init qed_init(void) 93 { 94 pr_info("%s", version); 95 96 return 0; 97 } 98 99 static void __exit qed_cleanup(void) 100 { 101 pr_notice("qed_cleanup called\n"); 102 } 103 104 module_init(qed_init); 105 module_exit(qed_cleanup); 106 107 /* Check if the DMA controller on the machine can properly handle the DMA 108 * addressing required by the device. 109 */ 110 static int qed_set_coherency_mask(struct qed_dev *cdev) 111 { 112 struct device *dev = &cdev->pdev->dev; 113 114 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 115 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 116 DP_NOTICE(cdev, 117 "Can't request 64-bit consistent allocations\n"); 118 return -EIO; 119 } 120 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 121 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 122 return -EIO; 123 } 124 125 return 0; 126 } 127 128 static void qed_free_pci(struct qed_dev *cdev) 129 { 130 struct pci_dev *pdev = cdev->pdev; 131 132 if (cdev->doorbells && cdev->db_size) 133 iounmap(cdev->doorbells); 134 if (cdev->regview) 135 iounmap(cdev->regview); 136 if (atomic_read(&pdev->enable_cnt) == 1) 137 pci_release_regions(pdev); 138 139 pci_disable_device(pdev); 140 } 141 142 #define PCI_REVISION_ID_ERROR_VAL 0xff 143 144 /* Performs PCI initializations as well as initializing PCI-related parameters 145 * in the device structrue. Returns 0 in case of success. 146 */ 147 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 148 { 149 u8 rev_id; 150 int rc; 151 152 cdev->pdev = pdev; 153 154 rc = pci_enable_device(pdev); 155 if (rc) { 156 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 157 goto err0; 158 } 159 160 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 161 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 162 rc = -EIO; 163 goto err1; 164 } 165 166 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 167 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 168 rc = -EIO; 169 goto err1; 170 } 171 172 if (atomic_read(&pdev->enable_cnt) == 1) { 173 rc = pci_request_regions(pdev, "qed"); 174 if (rc) { 175 DP_NOTICE(cdev, 176 "Failed to request PCI memory resources\n"); 177 goto err1; 178 } 179 pci_set_master(pdev); 180 pci_save_state(pdev); 181 } 182 183 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 184 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 185 DP_NOTICE(cdev, 186 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 187 rev_id); 188 rc = -ENODEV; 189 goto err2; 190 } 191 if (!pci_is_pcie(pdev)) { 192 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 193 rc = -EIO; 194 goto err2; 195 } 196 197 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 198 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 199 DP_NOTICE(cdev, "Cannot find power management capability\n"); 200 201 rc = qed_set_coherency_mask(cdev); 202 if (rc) 203 goto err2; 204 205 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 206 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 207 cdev->pci_params.irq = pdev->irq; 208 209 cdev->regview = pci_ioremap_bar(pdev, 0); 210 if (!cdev->regview) { 211 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 212 rc = -ENOMEM; 213 goto err2; 214 } 215 216 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 217 cdev->db_size = pci_resource_len(cdev->pdev, 2); 218 if (!cdev->db_size) { 219 if (IS_PF(cdev)) { 220 DP_NOTICE(cdev, "No Doorbell bar available\n"); 221 return -EINVAL; 222 } else { 223 return 0; 224 } 225 } 226 227 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 228 229 if (!cdev->doorbells) { 230 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 231 return -ENOMEM; 232 } 233 234 return 0; 235 236 err2: 237 pci_release_regions(pdev); 238 err1: 239 pci_disable_device(pdev); 240 err0: 241 return rc; 242 } 243 244 int qed_fill_dev_info(struct qed_dev *cdev, 245 struct qed_dev_info *dev_info) 246 { 247 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 248 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 249 struct qed_tunnel_info *tun = &cdev->tunnel; 250 struct qed_ptt *ptt; 251 252 memset(dev_info, 0, sizeof(struct qed_dev_info)); 253 254 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 255 tun->vxlan.b_mode_enabled) 256 dev_info->vxlan_enable = true; 257 258 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 259 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 260 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 261 dev_info->gre_enable = true; 262 263 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 264 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 265 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 266 dev_info->geneve_enable = true; 267 268 dev_info->num_hwfns = cdev->num_hwfns; 269 dev_info->pci_mem_start = cdev->pci_params.mem_start; 270 dev_info->pci_mem_end = cdev->pci_params.mem_end; 271 dev_info->pci_irq = cdev->pci_params.irq; 272 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 273 dev_info->dev_type = cdev->type; 274 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 275 276 if (IS_PF(cdev)) { 277 dev_info->fw_major = FW_MAJOR_VERSION; 278 dev_info->fw_minor = FW_MINOR_VERSION; 279 dev_info->fw_rev = FW_REVISION_VERSION; 280 dev_info->fw_eng = FW_ENGINEERING_VERSION; 281 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 282 &cdev->mf_bits); 283 dev_info->tx_switching = true; 284 285 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 286 dev_info->wol_support = true; 287 288 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 289 290 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 291 } else { 292 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 293 &dev_info->fw_minor, &dev_info->fw_rev, 294 &dev_info->fw_eng); 295 } 296 297 if (IS_PF(cdev)) { 298 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 299 if (ptt) { 300 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 301 &dev_info->mfw_rev, NULL); 302 303 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 304 &dev_info->mbi_version); 305 306 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 307 &dev_info->flash_size); 308 309 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 310 } 311 } else { 312 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 313 &dev_info->mfw_rev, NULL); 314 } 315 316 dev_info->mtu = hw_info->mtu; 317 318 return 0; 319 } 320 321 static void qed_free_cdev(struct qed_dev *cdev) 322 { 323 kfree((void *)cdev); 324 } 325 326 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 327 { 328 struct qed_dev *cdev; 329 330 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 331 if (!cdev) 332 return cdev; 333 334 qed_init_struct(cdev); 335 336 return cdev; 337 } 338 339 /* Sets the requested power state */ 340 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 341 { 342 if (!cdev) 343 return -ENODEV; 344 345 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 346 return 0; 347 } 348 349 struct qed_devlink { 350 struct qed_dev *cdev; 351 }; 352 353 enum qed_devlink_param_id { 354 QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 355 QED_DEVLINK_PARAM_ID_IWARP_CMT, 356 }; 357 358 static int qed_dl_param_get(struct devlink *dl, u32 id, 359 struct devlink_param_gset_ctx *ctx) 360 { 361 struct qed_devlink *qed_dl; 362 struct qed_dev *cdev; 363 364 qed_dl = devlink_priv(dl); 365 cdev = qed_dl->cdev; 366 ctx->val.vbool = cdev->iwarp_cmt; 367 368 return 0; 369 } 370 371 static int qed_dl_param_set(struct devlink *dl, u32 id, 372 struct devlink_param_gset_ctx *ctx) 373 { 374 struct qed_devlink *qed_dl; 375 struct qed_dev *cdev; 376 377 qed_dl = devlink_priv(dl); 378 cdev = qed_dl->cdev; 379 cdev->iwarp_cmt = ctx->val.vbool; 380 381 return 0; 382 } 383 384 static const struct devlink_param qed_devlink_params[] = { 385 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, 386 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, 387 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 388 qed_dl_param_get, qed_dl_param_set, NULL), 389 }; 390 391 static const struct devlink_ops qed_dl_ops; 392 393 static int qed_devlink_register(struct qed_dev *cdev) 394 { 395 union devlink_param_value value; 396 struct qed_devlink *qed_dl; 397 struct devlink *dl; 398 int rc; 399 400 dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl)); 401 if (!dl) 402 return -ENOMEM; 403 404 qed_dl = devlink_priv(dl); 405 406 cdev->dl = dl; 407 qed_dl->cdev = cdev; 408 409 rc = devlink_register(dl, &cdev->pdev->dev); 410 if (rc) 411 goto err_free; 412 413 rc = devlink_params_register(dl, qed_devlink_params, 414 ARRAY_SIZE(qed_devlink_params)); 415 if (rc) 416 goto err_unregister; 417 418 value.vbool = false; 419 devlink_param_driverinit_value_set(dl, 420 QED_DEVLINK_PARAM_ID_IWARP_CMT, 421 value); 422 423 devlink_params_publish(dl); 424 cdev->iwarp_cmt = false; 425 426 return 0; 427 428 err_unregister: 429 devlink_unregister(dl); 430 431 err_free: 432 cdev->dl = NULL; 433 devlink_free(dl); 434 435 return rc; 436 } 437 438 static void qed_devlink_unregister(struct qed_dev *cdev) 439 { 440 if (!cdev->dl) 441 return; 442 443 devlink_params_unregister(cdev->dl, qed_devlink_params, 444 ARRAY_SIZE(qed_devlink_params)); 445 446 devlink_unregister(cdev->dl); 447 devlink_free(cdev->dl); 448 } 449 450 /* probing */ 451 static struct qed_dev *qed_probe(struct pci_dev *pdev, 452 struct qed_probe_params *params) 453 { 454 struct qed_dev *cdev; 455 int rc; 456 457 cdev = qed_alloc_cdev(pdev); 458 if (!cdev) 459 goto err0; 460 461 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 462 cdev->protocol = params->protocol; 463 464 if (params->is_vf) 465 cdev->b_is_vf = true; 466 467 qed_init_dp(cdev, params->dp_module, params->dp_level); 468 469 cdev->recov_in_prog = params->recov_in_prog; 470 471 rc = qed_init_pci(cdev, pdev); 472 if (rc) { 473 DP_ERR(cdev, "init pci failed\n"); 474 goto err1; 475 } 476 DP_INFO(cdev, "PCI init completed successfully\n"); 477 478 rc = qed_devlink_register(cdev); 479 if (rc) { 480 DP_INFO(cdev, "Failed to register devlink.\n"); 481 goto err2; 482 } 483 484 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 485 if (rc) { 486 DP_ERR(cdev, "hw prepare failed\n"); 487 goto err2; 488 } 489 490 DP_INFO(cdev, "qed_probe completed successfully\n"); 491 492 return cdev; 493 494 err2: 495 qed_free_pci(cdev); 496 err1: 497 qed_free_cdev(cdev); 498 err0: 499 return NULL; 500 } 501 502 static void qed_remove(struct qed_dev *cdev) 503 { 504 if (!cdev) 505 return; 506 507 qed_hw_remove(cdev); 508 509 qed_free_pci(cdev); 510 511 qed_set_power_state(cdev, PCI_D3hot); 512 513 qed_devlink_unregister(cdev); 514 515 qed_free_cdev(cdev); 516 } 517 518 static void qed_disable_msix(struct qed_dev *cdev) 519 { 520 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 521 pci_disable_msix(cdev->pdev); 522 kfree(cdev->int_params.msix_table); 523 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 524 pci_disable_msi(cdev->pdev); 525 } 526 527 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 528 } 529 530 static int qed_enable_msix(struct qed_dev *cdev, 531 struct qed_int_params *int_params) 532 { 533 int i, rc, cnt; 534 535 cnt = int_params->in.num_vectors; 536 537 for (i = 0; i < cnt; i++) 538 int_params->msix_table[i].entry = i; 539 540 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 541 int_params->in.min_msix_cnt, cnt); 542 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 543 (rc % cdev->num_hwfns)) { 544 pci_disable_msix(cdev->pdev); 545 546 /* If fastpath is initialized, we need at least one interrupt 547 * per hwfn [and the slow path interrupts]. New requested number 548 * should be a multiple of the number of hwfns. 549 */ 550 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 551 DP_NOTICE(cdev, 552 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 553 cnt, int_params->in.num_vectors); 554 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 555 cnt); 556 if (!rc) 557 rc = cnt; 558 } 559 560 if (rc > 0) { 561 /* MSI-x configuration was achieved */ 562 int_params->out.int_mode = QED_INT_MODE_MSIX; 563 int_params->out.num_vectors = rc; 564 rc = 0; 565 } else { 566 DP_NOTICE(cdev, 567 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 568 cnt, rc); 569 } 570 571 return rc; 572 } 573 574 /* This function outputs the int mode and the number of enabled msix vector */ 575 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 576 { 577 struct qed_int_params *int_params = &cdev->int_params; 578 struct msix_entry *tbl; 579 int rc = 0, cnt; 580 581 switch (int_params->in.int_mode) { 582 case QED_INT_MODE_MSIX: 583 /* Allocate MSIX table */ 584 cnt = int_params->in.num_vectors; 585 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 586 if (!int_params->msix_table) { 587 rc = -ENOMEM; 588 goto out; 589 } 590 591 /* Enable MSIX */ 592 rc = qed_enable_msix(cdev, int_params); 593 if (!rc) 594 goto out; 595 596 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 597 kfree(int_params->msix_table); 598 if (force_mode) 599 goto out; 600 /* Fallthrough */ 601 602 case QED_INT_MODE_MSI: 603 if (cdev->num_hwfns == 1) { 604 rc = pci_enable_msi(cdev->pdev); 605 if (!rc) { 606 int_params->out.int_mode = QED_INT_MODE_MSI; 607 goto out; 608 } 609 610 DP_NOTICE(cdev, "Failed to enable MSI\n"); 611 if (force_mode) 612 goto out; 613 } 614 /* Fallthrough */ 615 616 case QED_INT_MODE_INTA: 617 int_params->out.int_mode = QED_INT_MODE_INTA; 618 rc = 0; 619 goto out; 620 default: 621 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 622 int_params->in.int_mode); 623 rc = -EINVAL; 624 } 625 626 out: 627 if (!rc) 628 DP_INFO(cdev, "Using %s interrupts\n", 629 int_params->out.int_mode == QED_INT_MODE_INTA ? 630 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 631 "MSI" : "MSIX"); 632 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 633 634 return rc; 635 } 636 637 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 638 int index, void(*handler)(void *)) 639 { 640 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 641 int relative_idx = index / cdev->num_hwfns; 642 643 hwfn->simd_proto_handler[relative_idx].func = handler; 644 hwfn->simd_proto_handler[relative_idx].token = token; 645 } 646 647 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 648 { 649 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 650 int relative_idx = index / cdev->num_hwfns; 651 652 memset(&hwfn->simd_proto_handler[relative_idx], 0, 653 sizeof(struct qed_simd_fp_handler)); 654 } 655 656 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 657 { 658 tasklet_schedule((struct tasklet_struct *)tasklet); 659 return IRQ_HANDLED; 660 } 661 662 static irqreturn_t qed_single_int(int irq, void *dev_instance) 663 { 664 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 665 struct qed_hwfn *hwfn; 666 irqreturn_t rc = IRQ_NONE; 667 u64 status; 668 int i, j; 669 670 for (i = 0; i < cdev->num_hwfns; i++) { 671 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 672 673 if (!status) 674 continue; 675 676 hwfn = &cdev->hwfns[i]; 677 678 /* Slowpath interrupt */ 679 if (unlikely(status & 0x1)) { 680 tasklet_schedule(hwfn->sp_dpc); 681 status &= ~0x1; 682 rc = IRQ_HANDLED; 683 } 684 685 /* Fastpath interrupts */ 686 for (j = 0; j < 64; j++) { 687 if ((0x2ULL << j) & status) { 688 struct qed_simd_fp_handler *p_handler = 689 &hwfn->simd_proto_handler[j]; 690 691 if (p_handler->func) 692 p_handler->func(p_handler->token); 693 else 694 DP_NOTICE(hwfn, 695 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 696 j, status); 697 698 status &= ~(0x2ULL << j); 699 rc = IRQ_HANDLED; 700 } 701 } 702 703 if (unlikely(status)) 704 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 705 "got an unknown interrupt status 0x%llx\n", 706 status); 707 } 708 709 return rc; 710 } 711 712 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 713 { 714 struct qed_dev *cdev = hwfn->cdev; 715 u32 int_mode; 716 int rc = 0; 717 u8 id; 718 719 int_mode = cdev->int_params.out.int_mode; 720 if (int_mode == QED_INT_MODE_MSIX) { 721 id = hwfn->my_id; 722 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 723 id, cdev->pdev->bus->number, 724 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 725 rc = request_irq(cdev->int_params.msix_table[id].vector, 726 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 727 } else { 728 unsigned long flags = 0; 729 730 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 731 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 732 PCI_FUNC(cdev->pdev->devfn)); 733 734 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 735 flags |= IRQF_SHARED; 736 737 rc = request_irq(cdev->pdev->irq, qed_single_int, 738 flags, cdev->name, cdev); 739 } 740 741 if (rc) 742 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 743 else 744 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 745 "Requested slowpath %s\n", 746 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 747 748 return rc; 749 } 750 751 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 752 { 753 /* Calling the disable function will make sure that any 754 * currently-running function is completed. The following call to the 755 * enable function makes this sequence a flush-like operation. 756 */ 757 if (p_hwfn->b_sp_dpc_enabled) { 758 tasklet_disable(p_hwfn->sp_dpc); 759 tasklet_enable(p_hwfn->sp_dpc); 760 } 761 } 762 763 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 764 { 765 struct qed_dev *cdev = p_hwfn->cdev; 766 u8 id = p_hwfn->my_id; 767 u32 int_mode; 768 769 int_mode = cdev->int_params.out.int_mode; 770 if (int_mode == QED_INT_MODE_MSIX) 771 synchronize_irq(cdev->int_params.msix_table[id].vector); 772 else 773 synchronize_irq(cdev->pdev->irq); 774 775 qed_slowpath_tasklet_flush(p_hwfn); 776 } 777 778 static void qed_slowpath_irq_free(struct qed_dev *cdev) 779 { 780 int i; 781 782 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 783 for_each_hwfn(cdev, i) { 784 if (!cdev->hwfns[i].b_int_requested) 785 break; 786 synchronize_irq(cdev->int_params.msix_table[i].vector); 787 free_irq(cdev->int_params.msix_table[i].vector, 788 cdev->hwfns[i].sp_dpc); 789 } 790 } else { 791 if (QED_LEADING_HWFN(cdev)->b_int_requested) 792 free_irq(cdev->pdev->irq, cdev); 793 } 794 qed_int_disable_post_isr_release(cdev); 795 } 796 797 static int qed_nic_stop(struct qed_dev *cdev) 798 { 799 int i, rc; 800 801 rc = qed_hw_stop(cdev); 802 803 for (i = 0; i < cdev->num_hwfns; i++) { 804 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 805 806 if (p_hwfn->b_sp_dpc_enabled) { 807 tasklet_disable(p_hwfn->sp_dpc); 808 p_hwfn->b_sp_dpc_enabled = false; 809 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 810 "Disabled sp tasklet [hwfn %d] at %p\n", 811 i, p_hwfn->sp_dpc); 812 } 813 } 814 815 qed_dbg_pf_exit(cdev); 816 817 return rc; 818 } 819 820 static int qed_nic_setup(struct qed_dev *cdev) 821 { 822 int rc, i; 823 824 /* Determine if interface is going to require LL2 */ 825 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 826 for (i = 0; i < cdev->num_hwfns; i++) { 827 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 828 829 p_hwfn->using_ll2 = true; 830 } 831 } 832 833 rc = qed_resc_alloc(cdev); 834 if (rc) 835 return rc; 836 837 DP_INFO(cdev, "Allocated qed resources\n"); 838 839 qed_resc_setup(cdev); 840 841 return rc; 842 } 843 844 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 845 { 846 int limit = 0; 847 848 /* Mark the fastpath as free/used */ 849 cdev->int_params.fp_initialized = cnt ? true : false; 850 851 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 852 limit = cdev->num_hwfns * 63; 853 else if (cdev->int_params.fp_msix_cnt) 854 limit = cdev->int_params.fp_msix_cnt; 855 856 if (!limit) 857 return -ENOMEM; 858 859 return min_t(int, cnt, limit); 860 } 861 862 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 863 { 864 memset(info, 0, sizeof(struct qed_int_info)); 865 866 if (!cdev->int_params.fp_initialized) { 867 DP_INFO(cdev, 868 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 869 return -EINVAL; 870 } 871 872 /* Need to expose only MSI-X information; Single IRQ is handled solely 873 * by qed. 874 */ 875 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 876 int msix_base = cdev->int_params.fp_msix_base; 877 878 info->msix_cnt = cdev->int_params.fp_msix_cnt; 879 info->msix = &cdev->int_params.msix_table[msix_base]; 880 } 881 882 return 0; 883 } 884 885 static int qed_slowpath_setup_int(struct qed_dev *cdev, 886 enum qed_int_mode int_mode) 887 { 888 struct qed_sb_cnt_info sb_cnt_info; 889 int num_l2_queues = 0; 890 int rc; 891 int i; 892 893 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 894 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 895 return -EINVAL; 896 } 897 898 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 899 cdev->int_params.in.int_mode = int_mode; 900 for_each_hwfn(cdev, i) { 901 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 902 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 903 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 904 cdev->int_params.in.num_vectors++; /* slowpath */ 905 } 906 907 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 908 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 909 910 if (is_kdump_kernel()) { 911 DP_INFO(cdev, 912 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 913 cdev->int_params.in.min_msix_cnt); 914 cdev->int_params.in.num_vectors = 915 cdev->int_params.in.min_msix_cnt; 916 } 917 918 rc = qed_set_int_mode(cdev, false); 919 if (rc) { 920 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 921 return rc; 922 } 923 924 cdev->int_params.fp_msix_base = cdev->num_hwfns; 925 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 926 cdev->num_hwfns; 927 928 if (!IS_ENABLED(CONFIG_QED_RDMA) || 929 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 930 return 0; 931 932 for_each_hwfn(cdev, i) 933 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 934 935 DP_VERBOSE(cdev, QED_MSG_RDMA, 936 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 937 cdev->int_params.fp_msix_cnt, num_l2_queues); 938 939 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 940 cdev->int_params.rdma_msix_cnt = 941 (cdev->int_params.fp_msix_cnt - num_l2_queues) 942 / cdev->num_hwfns; 943 cdev->int_params.rdma_msix_base = 944 cdev->int_params.fp_msix_base + num_l2_queues; 945 cdev->int_params.fp_msix_cnt = num_l2_queues; 946 } else { 947 cdev->int_params.rdma_msix_cnt = 0; 948 } 949 950 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 951 cdev->int_params.rdma_msix_cnt, 952 cdev->int_params.rdma_msix_base); 953 954 return 0; 955 } 956 957 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 958 { 959 int rc; 960 961 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 962 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 963 964 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 965 &cdev->int_params.in.num_vectors); 966 if (cdev->num_hwfns > 1) { 967 u8 vectors = 0; 968 969 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 970 cdev->int_params.in.num_vectors += vectors; 971 } 972 973 /* We want a minimum of one fastpath vector per vf hwfn */ 974 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 975 976 rc = qed_set_int_mode(cdev, true); 977 if (rc) 978 return rc; 979 980 cdev->int_params.fp_msix_base = 0; 981 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 982 983 return 0; 984 } 985 986 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 987 u8 *input_buf, u32 max_size, u8 *unzip_buf) 988 { 989 int rc; 990 991 p_hwfn->stream->next_in = input_buf; 992 p_hwfn->stream->avail_in = input_len; 993 p_hwfn->stream->next_out = unzip_buf; 994 p_hwfn->stream->avail_out = max_size; 995 996 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 997 998 if (rc != Z_OK) { 999 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1000 rc); 1001 return 0; 1002 } 1003 1004 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1005 zlib_inflateEnd(p_hwfn->stream); 1006 1007 if (rc != Z_OK && rc != Z_STREAM_END) { 1008 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1009 p_hwfn->stream->msg, rc); 1010 return 0; 1011 } 1012 1013 return p_hwfn->stream->total_out / 4; 1014 } 1015 1016 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1017 { 1018 int i; 1019 void *workspace; 1020 1021 for_each_hwfn(cdev, i) { 1022 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1023 1024 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1025 if (!p_hwfn->stream) 1026 return -ENOMEM; 1027 1028 workspace = vzalloc(zlib_inflate_workspacesize()); 1029 if (!workspace) 1030 return -ENOMEM; 1031 p_hwfn->stream->workspace = workspace; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void qed_free_stream_mem(struct qed_dev *cdev) 1038 { 1039 int i; 1040 1041 for_each_hwfn(cdev, i) { 1042 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1043 1044 if (!p_hwfn->stream) 1045 return; 1046 1047 vfree(p_hwfn->stream->workspace); 1048 kfree(p_hwfn->stream); 1049 } 1050 } 1051 1052 static void qed_update_pf_params(struct qed_dev *cdev, 1053 struct qed_pf_params *params) 1054 { 1055 int i; 1056 1057 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1058 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1059 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1060 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1061 /* divide by 3 the MRs to avoid MF ILT overflow */ 1062 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1063 } 1064 1065 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1066 params->eth_pf_params.num_arfs_filters = 0; 1067 1068 /* In case we might support RDMA, don't allow qede to be greedy 1069 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1070 * per hwfn. 1071 */ 1072 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1073 u16 *num_cons; 1074 1075 num_cons = ¶ms->eth_pf_params.num_cons; 1076 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1077 } 1078 1079 for (i = 0; i < cdev->num_hwfns; i++) { 1080 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1081 1082 p_hwfn->pf_params = *params; 1083 } 1084 } 1085 1086 #define QED_PERIODIC_DB_REC_COUNT 10 1087 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1088 #define QED_PERIODIC_DB_REC_INTERVAL \ 1089 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1090 #define QED_PERIODIC_DB_REC_WAIT_COUNT 10 1091 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ 1092 (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) 1093 1094 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1095 enum qed_slowpath_wq_flag wq_flag, 1096 unsigned long delay) 1097 { 1098 if (!hwfn->slowpath_wq_active) 1099 return -EINVAL; 1100 1101 /* Memory barrier for setting atomic bit */ 1102 smp_mb__before_atomic(); 1103 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1104 smp_mb__after_atomic(); 1105 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1106 1107 return 0; 1108 } 1109 1110 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1111 { 1112 /* Reset periodic Doorbell Recovery counter */ 1113 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1114 1115 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1116 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1117 &p_hwfn->slowpath_task_flags)) 1118 return; 1119 1120 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1121 QED_PERIODIC_DB_REC_INTERVAL); 1122 } 1123 1124 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1125 { 1126 int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; 1127 1128 if (IS_VF(cdev)) 1129 return; 1130 1131 for_each_hwfn(cdev, i) { 1132 if (!cdev->hwfns[i].slowpath_wq) 1133 continue; 1134 1135 /* Stop queuing new delayed works */ 1136 cdev->hwfns[i].slowpath_wq_active = false; 1137 1138 /* Wait until the last periodic doorbell recovery is executed */ 1139 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1140 &cdev->hwfns[i].slowpath_task_flags) && 1141 sleep_count--) 1142 msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); 1143 1144 flush_workqueue(cdev->hwfns[i].slowpath_wq); 1145 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1146 } 1147 } 1148 1149 static void qed_slowpath_task(struct work_struct *work) 1150 { 1151 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1152 slowpath_task.work); 1153 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1154 1155 if (!ptt) { 1156 if (hwfn->slowpath_wq_active) 1157 queue_delayed_work(hwfn->slowpath_wq, 1158 &hwfn->slowpath_task, 0); 1159 1160 return; 1161 } 1162 1163 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1164 &hwfn->slowpath_task_flags)) 1165 qed_mfw_process_tlv_req(hwfn, ptt); 1166 1167 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1168 &hwfn->slowpath_task_flags)) { 1169 qed_db_rec_handler(hwfn, ptt); 1170 if (hwfn->periodic_db_rec_count--) 1171 qed_slowpath_delayed_work(hwfn, 1172 QED_SLOWPATH_PERIODIC_DB_REC, 1173 QED_PERIODIC_DB_REC_INTERVAL); 1174 } 1175 1176 qed_ptt_release(hwfn, ptt); 1177 } 1178 1179 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1180 { 1181 struct qed_hwfn *hwfn; 1182 char name[NAME_SIZE]; 1183 int i; 1184 1185 if (IS_VF(cdev)) 1186 return 0; 1187 1188 for_each_hwfn(cdev, i) { 1189 hwfn = &cdev->hwfns[i]; 1190 1191 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1192 cdev->pdev->bus->number, 1193 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1194 1195 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1196 if (!hwfn->slowpath_wq) { 1197 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1198 return -ENOMEM; 1199 } 1200 1201 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1202 hwfn->slowpath_wq_active = true; 1203 } 1204 1205 return 0; 1206 } 1207 1208 static int qed_slowpath_start(struct qed_dev *cdev, 1209 struct qed_slowpath_params *params) 1210 { 1211 struct qed_drv_load_params drv_load_params; 1212 struct qed_hw_init_params hw_init_params; 1213 struct qed_mcp_drv_version drv_version; 1214 struct qed_tunnel_info tunn_info; 1215 const u8 *data = NULL; 1216 struct qed_hwfn *hwfn; 1217 struct qed_ptt *p_ptt; 1218 int rc = -EINVAL; 1219 1220 if (qed_iov_wq_start(cdev)) 1221 goto err; 1222 1223 if (qed_slowpath_wq_start(cdev)) 1224 goto err; 1225 1226 if (IS_PF(cdev)) { 1227 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1228 &cdev->pdev->dev); 1229 if (rc) { 1230 DP_NOTICE(cdev, 1231 "Failed to find fw file - /lib/firmware/%s\n", 1232 QED_FW_FILE_NAME); 1233 goto err; 1234 } 1235 1236 if (cdev->num_hwfns == 1) { 1237 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1238 if (p_ptt) { 1239 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1240 } else { 1241 DP_NOTICE(cdev, 1242 "Failed to acquire PTT for aRFS\n"); 1243 goto err; 1244 } 1245 } 1246 } 1247 1248 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1249 rc = qed_nic_setup(cdev); 1250 if (rc) 1251 goto err; 1252 1253 if (IS_PF(cdev)) 1254 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1255 else 1256 rc = qed_slowpath_vf_setup_int(cdev); 1257 if (rc) 1258 goto err1; 1259 1260 if (IS_PF(cdev)) { 1261 /* Allocate stream for unzipping */ 1262 rc = qed_alloc_stream_mem(cdev); 1263 if (rc) 1264 goto err2; 1265 1266 /* First Dword used to differentiate between various sources */ 1267 data = cdev->firmware->data + sizeof(u32); 1268 1269 qed_dbg_pf_init(cdev); 1270 } 1271 1272 /* Start the slowpath */ 1273 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1274 memset(&tunn_info, 0, sizeof(tunn_info)); 1275 tunn_info.vxlan.b_mode_enabled = true; 1276 tunn_info.l2_gre.b_mode_enabled = true; 1277 tunn_info.ip_gre.b_mode_enabled = true; 1278 tunn_info.l2_geneve.b_mode_enabled = true; 1279 tunn_info.ip_geneve.b_mode_enabled = true; 1280 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1281 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1282 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1283 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1284 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1285 hw_init_params.p_tunn = &tunn_info; 1286 hw_init_params.b_hw_start = true; 1287 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1288 hw_init_params.allow_npar_tx_switch = true; 1289 hw_init_params.bin_fw_data = data; 1290 1291 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1292 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1293 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1294 drv_load_params.avoid_eng_reset = false; 1295 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1296 hw_init_params.p_drv_load_params = &drv_load_params; 1297 1298 rc = qed_hw_init(cdev, &hw_init_params); 1299 if (rc) 1300 goto err2; 1301 1302 DP_INFO(cdev, 1303 "HW initialization and function start completed successfully\n"); 1304 1305 if (IS_PF(cdev)) { 1306 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1307 BIT(QED_MODE_L2GENEVE_TUNN) | 1308 BIT(QED_MODE_IPGENEVE_TUNN) | 1309 BIT(QED_MODE_L2GRE_TUNN) | 1310 BIT(QED_MODE_IPGRE_TUNN)); 1311 } 1312 1313 /* Allocate LL2 interface if needed */ 1314 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1315 rc = qed_ll2_alloc_if(cdev); 1316 if (rc) 1317 goto err3; 1318 } 1319 if (IS_PF(cdev)) { 1320 hwfn = QED_LEADING_HWFN(cdev); 1321 drv_version.version = (params->drv_major << 24) | 1322 (params->drv_minor << 16) | 1323 (params->drv_rev << 8) | 1324 (params->drv_eng); 1325 strlcpy(drv_version.name, params->name, 1326 MCP_DRV_VER_STR_SIZE - 4); 1327 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1328 &drv_version); 1329 if (rc) { 1330 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1331 goto err4; 1332 } 1333 } 1334 1335 qed_reset_vport_stats(cdev); 1336 1337 return 0; 1338 1339 err4: 1340 qed_ll2_dealloc_if(cdev); 1341 err3: 1342 qed_hw_stop(cdev); 1343 err2: 1344 qed_hw_timers_stop_all(cdev); 1345 if (IS_PF(cdev)) 1346 qed_slowpath_irq_free(cdev); 1347 qed_free_stream_mem(cdev); 1348 qed_disable_msix(cdev); 1349 err1: 1350 qed_resc_free(cdev); 1351 err: 1352 if (IS_PF(cdev)) 1353 release_firmware(cdev->firmware); 1354 1355 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1356 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1357 qed_ptt_release(QED_LEADING_HWFN(cdev), 1358 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1359 1360 qed_iov_wq_stop(cdev, false); 1361 1362 qed_slowpath_wq_stop(cdev); 1363 1364 return rc; 1365 } 1366 1367 static int qed_slowpath_stop(struct qed_dev *cdev) 1368 { 1369 if (!cdev) 1370 return -ENODEV; 1371 1372 qed_slowpath_wq_stop(cdev); 1373 1374 qed_ll2_dealloc_if(cdev); 1375 1376 if (IS_PF(cdev)) { 1377 if (cdev->num_hwfns == 1) 1378 qed_ptt_release(QED_LEADING_HWFN(cdev), 1379 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1380 qed_free_stream_mem(cdev); 1381 if (IS_QED_ETH_IF(cdev)) 1382 qed_sriov_disable(cdev, true); 1383 } 1384 1385 qed_nic_stop(cdev); 1386 1387 if (IS_PF(cdev)) 1388 qed_slowpath_irq_free(cdev); 1389 1390 qed_disable_msix(cdev); 1391 1392 qed_resc_free(cdev); 1393 1394 qed_iov_wq_stop(cdev, true); 1395 1396 if (IS_PF(cdev)) 1397 release_firmware(cdev->firmware); 1398 1399 return 0; 1400 } 1401 1402 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1403 { 1404 int i; 1405 1406 memcpy(cdev->name, name, NAME_SIZE); 1407 for_each_hwfn(cdev, i) 1408 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1409 } 1410 1411 static u32 qed_sb_init(struct qed_dev *cdev, 1412 struct qed_sb_info *sb_info, 1413 void *sb_virt_addr, 1414 dma_addr_t sb_phy_addr, u16 sb_id, 1415 enum qed_sb_type type) 1416 { 1417 struct qed_hwfn *p_hwfn; 1418 struct qed_ptt *p_ptt; 1419 u16 rel_sb_id; 1420 u32 rc; 1421 1422 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1423 if (type == QED_SB_TYPE_L2_QUEUE) { 1424 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1425 rel_sb_id = sb_id / cdev->num_hwfns; 1426 } else { 1427 p_hwfn = QED_AFFIN_HWFN(cdev); 1428 rel_sb_id = sb_id; 1429 } 1430 1431 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1432 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1433 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1434 1435 if (IS_PF(p_hwfn->cdev)) { 1436 p_ptt = qed_ptt_acquire(p_hwfn); 1437 if (!p_ptt) 1438 return -EBUSY; 1439 1440 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1441 sb_phy_addr, rel_sb_id); 1442 qed_ptt_release(p_hwfn, p_ptt); 1443 } else { 1444 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1445 sb_phy_addr, rel_sb_id); 1446 } 1447 1448 return rc; 1449 } 1450 1451 static u32 qed_sb_release(struct qed_dev *cdev, 1452 struct qed_sb_info *sb_info, 1453 u16 sb_id, 1454 enum qed_sb_type type) 1455 { 1456 struct qed_hwfn *p_hwfn; 1457 u16 rel_sb_id; 1458 u32 rc; 1459 1460 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1461 if (type == QED_SB_TYPE_L2_QUEUE) { 1462 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1463 rel_sb_id = sb_id / cdev->num_hwfns; 1464 } else { 1465 p_hwfn = QED_AFFIN_HWFN(cdev); 1466 rel_sb_id = sb_id; 1467 } 1468 1469 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1470 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1471 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1472 1473 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1474 1475 return rc; 1476 } 1477 1478 static bool qed_can_link_change(struct qed_dev *cdev) 1479 { 1480 return true; 1481 } 1482 1483 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1484 { 1485 struct qed_hwfn *hwfn; 1486 struct qed_mcp_link_params *link_params; 1487 struct qed_ptt *ptt; 1488 u32 sup_caps; 1489 int rc; 1490 1491 if (!cdev) 1492 return -ENODEV; 1493 1494 /* The link should be set only once per PF */ 1495 hwfn = &cdev->hwfns[0]; 1496 1497 /* When VF wants to set link, force it to read the bulletin instead. 1498 * This mimics the PF behavior, where a noitification [both immediate 1499 * and possible later] would be generated when changing properties. 1500 */ 1501 if (IS_VF(cdev)) { 1502 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1503 return 0; 1504 } 1505 1506 ptt = qed_ptt_acquire(hwfn); 1507 if (!ptt) 1508 return -EBUSY; 1509 1510 link_params = qed_mcp_get_link_params(hwfn); 1511 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1512 link_params->speed.autoneg = params->autoneg; 1513 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1514 link_params->speed.advertised_speeds = 0; 1515 sup_caps = QED_LM_1000baseT_Full_BIT | 1516 QED_LM_1000baseKX_Full_BIT | 1517 QED_LM_1000baseX_Full_BIT; 1518 if (params->adv_speeds & sup_caps) 1519 link_params->speed.advertised_speeds |= 1520 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1521 sup_caps = QED_LM_10000baseT_Full_BIT | 1522 QED_LM_10000baseKR_Full_BIT | 1523 QED_LM_10000baseKX4_Full_BIT | 1524 QED_LM_10000baseR_FEC_BIT | 1525 QED_LM_10000baseCR_Full_BIT | 1526 QED_LM_10000baseSR_Full_BIT | 1527 QED_LM_10000baseLR_Full_BIT | 1528 QED_LM_10000baseLRM_Full_BIT; 1529 if (params->adv_speeds & sup_caps) 1530 link_params->speed.advertised_speeds |= 1531 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1532 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) 1533 link_params->speed.advertised_speeds |= 1534 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1535 sup_caps = QED_LM_25000baseKR_Full_BIT | 1536 QED_LM_25000baseCR_Full_BIT | 1537 QED_LM_25000baseSR_Full_BIT; 1538 if (params->adv_speeds & sup_caps) 1539 link_params->speed.advertised_speeds |= 1540 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1541 sup_caps = QED_LM_40000baseLR4_Full_BIT | 1542 QED_LM_40000baseKR4_Full_BIT | 1543 QED_LM_40000baseCR4_Full_BIT | 1544 QED_LM_40000baseSR4_Full_BIT; 1545 if (params->adv_speeds & sup_caps) 1546 link_params->speed.advertised_speeds |= 1547 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1548 sup_caps = QED_LM_50000baseKR2_Full_BIT | 1549 QED_LM_50000baseCR2_Full_BIT | 1550 QED_LM_50000baseSR2_Full_BIT; 1551 if (params->adv_speeds & sup_caps) 1552 link_params->speed.advertised_speeds |= 1553 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1554 sup_caps = QED_LM_100000baseKR4_Full_BIT | 1555 QED_LM_100000baseSR4_Full_BIT | 1556 QED_LM_100000baseCR4_Full_BIT | 1557 QED_LM_100000baseLR4_ER4_Full_BIT; 1558 if (params->adv_speeds & sup_caps) 1559 link_params->speed.advertised_speeds |= 1560 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1561 } 1562 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1563 link_params->speed.forced_speed = params->forced_speed; 1564 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1565 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1566 link_params->pause.autoneg = true; 1567 else 1568 link_params->pause.autoneg = false; 1569 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1570 link_params->pause.forced_rx = true; 1571 else 1572 link_params->pause.forced_rx = false; 1573 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1574 link_params->pause.forced_tx = true; 1575 else 1576 link_params->pause.forced_tx = false; 1577 } 1578 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1579 switch (params->loopback_mode) { 1580 case QED_LINK_LOOPBACK_INT_PHY: 1581 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1582 break; 1583 case QED_LINK_LOOPBACK_EXT_PHY: 1584 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1585 break; 1586 case QED_LINK_LOOPBACK_EXT: 1587 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1588 break; 1589 case QED_LINK_LOOPBACK_MAC: 1590 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1591 break; 1592 default: 1593 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1594 break; 1595 } 1596 } 1597 1598 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1599 memcpy(&link_params->eee, ¶ms->eee, 1600 sizeof(link_params->eee)); 1601 1602 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1603 1604 qed_ptt_release(hwfn, ptt); 1605 1606 return rc; 1607 } 1608 1609 static int qed_get_port_type(u32 media_type) 1610 { 1611 int port_type; 1612 1613 switch (media_type) { 1614 case MEDIA_SFPP_10G_FIBER: 1615 case MEDIA_SFP_1G_FIBER: 1616 case MEDIA_XFP_FIBER: 1617 case MEDIA_MODULE_FIBER: 1618 case MEDIA_KR: 1619 port_type = PORT_FIBRE; 1620 break; 1621 case MEDIA_DA_TWINAX: 1622 port_type = PORT_DA; 1623 break; 1624 case MEDIA_BASE_T: 1625 port_type = PORT_TP; 1626 break; 1627 case MEDIA_NOT_PRESENT: 1628 port_type = PORT_NONE; 1629 break; 1630 case MEDIA_UNSPECIFIED: 1631 default: 1632 port_type = PORT_OTHER; 1633 break; 1634 } 1635 return port_type; 1636 } 1637 1638 static int qed_get_link_data(struct qed_hwfn *hwfn, 1639 struct qed_mcp_link_params *params, 1640 struct qed_mcp_link_state *link, 1641 struct qed_mcp_link_capabilities *link_caps) 1642 { 1643 void *p; 1644 1645 if (!IS_PF(hwfn->cdev)) { 1646 qed_vf_get_link_params(hwfn, params); 1647 qed_vf_get_link_state(hwfn, link); 1648 qed_vf_get_link_caps(hwfn, link_caps); 1649 1650 return 0; 1651 } 1652 1653 p = qed_mcp_get_link_params(hwfn); 1654 if (!p) 1655 return -ENXIO; 1656 memcpy(params, p, sizeof(*params)); 1657 1658 p = qed_mcp_get_link_state(hwfn); 1659 if (!p) 1660 return -ENXIO; 1661 memcpy(link, p, sizeof(*link)); 1662 1663 p = qed_mcp_get_link_capabilities(hwfn); 1664 if (!p) 1665 return -ENXIO; 1666 memcpy(link_caps, p, sizeof(*link_caps)); 1667 1668 return 0; 1669 } 1670 1671 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1672 struct qed_ptt *ptt, u32 capability, 1673 u32 *if_capability) 1674 { 1675 u32 media_type, tcvr_state, tcvr_type; 1676 u32 speed_mask, board_cfg; 1677 1678 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1679 media_type = MEDIA_UNSPECIFIED; 1680 1681 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1682 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1683 1684 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1685 speed_mask = 0xFFFFFFFF; 1686 1687 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1688 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1689 1690 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1691 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1692 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1693 1694 switch (media_type) { 1695 case MEDIA_DA_TWINAX: 1696 *if_capability |= QED_LM_FIBRE_BIT; 1697 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1698 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1699 /* For DAC media multiple speed capabilities are supported*/ 1700 capability = capability & speed_mask; 1701 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1702 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1703 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1704 *if_capability |= QED_LM_10000baseCR_Full_BIT; 1705 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1706 *if_capability |= QED_LM_40000baseCR4_Full_BIT; 1707 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1708 *if_capability |= QED_LM_25000baseCR_Full_BIT; 1709 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1710 *if_capability |= QED_LM_50000baseCR2_Full_BIT; 1711 if (capability & 1712 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1713 *if_capability |= QED_LM_100000baseCR4_Full_BIT; 1714 break; 1715 case MEDIA_BASE_T: 1716 *if_capability |= QED_LM_TP_BIT; 1717 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1718 if (capability & 1719 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1720 *if_capability |= QED_LM_1000baseT_Full_BIT; 1721 } 1722 if (capability & 1723 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1724 *if_capability |= QED_LM_10000baseT_Full_BIT; 1725 } 1726 } 1727 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1728 *if_capability |= QED_LM_FIBRE_BIT; 1729 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) 1730 *if_capability |= QED_LM_1000baseT_Full_BIT; 1731 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) 1732 *if_capability |= QED_LM_10000baseT_Full_BIT; 1733 } 1734 break; 1735 case MEDIA_SFP_1G_FIBER: 1736 case MEDIA_SFPP_10G_FIBER: 1737 case MEDIA_XFP_FIBER: 1738 case MEDIA_MODULE_FIBER: 1739 *if_capability |= QED_LM_FIBRE_BIT; 1740 if (capability & 1741 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1742 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || 1743 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) 1744 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1745 } 1746 if (capability & 1747 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1748 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) 1749 *if_capability |= QED_LM_10000baseSR_Full_BIT; 1750 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) 1751 *if_capability |= QED_LM_10000baseLR_Full_BIT; 1752 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) 1753 *if_capability |= QED_LM_10000baseLRM_Full_BIT; 1754 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) 1755 *if_capability |= QED_LM_10000baseR_FEC_BIT; 1756 } 1757 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1758 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1759 if (capability & 1760 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { 1761 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) 1762 *if_capability |= QED_LM_25000baseSR_Full_BIT; 1763 } 1764 if (capability & 1765 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { 1766 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) 1767 *if_capability |= QED_LM_40000baseLR4_Full_BIT; 1768 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) 1769 *if_capability |= QED_LM_40000baseSR4_Full_BIT; 1770 } 1771 if (capability & 1772 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1773 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1774 if (capability & 1775 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { 1776 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) 1777 *if_capability |= QED_LM_100000baseSR4_Full_BIT; 1778 } 1779 1780 break; 1781 case MEDIA_KR: 1782 *if_capability |= QED_LM_Backplane_BIT; 1783 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1784 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1785 if (capability & 1786 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1787 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1788 if (capability & 1789 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1790 *if_capability |= QED_LM_10000baseKR_Full_BIT; 1791 if (capability & 1792 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1793 *if_capability |= QED_LM_25000baseKR_Full_BIT; 1794 if (capability & 1795 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1796 *if_capability |= QED_LM_40000baseKR4_Full_BIT; 1797 if (capability & 1798 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1799 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1800 if (capability & 1801 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1802 *if_capability |= QED_LM_100000baseKR4_Full_BIT; 1803 break; 1804 case MEDIA_UNSPECIFIED: 1805 case MEDIA_NOT_PRESENT: 1806 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 1807 "Unknown media and transceiver type;\n"); 1808 break; 1809 } 1810 } 1811 1812 static void qed_fill_link(struct qed_hwfn *hwfn, 1813 struct qed_ptt *ptt, 1814 struct qed_link_output *if_link) 1815 { 1816 struct qed_mcp_link_capabilities link_caps; 1817 struct qed_mcp_link_params params; 1818 struct qed_mcp_link_state link; 1819 u32 media_type; 1820 1821 memset(if_link, 0, sizeof(*if_link)); 1822 1823 /* Prepare source inputs */ 1824 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1825 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1826 return; 1827 } 1828 1829 /* Set the link parameters to pass to protocol driver */ 1830 if (link.link_up) 1831 if_link->link_up = true; 1832 1833 /* TODO - at the moment assume supported and advertised speed equal */ 1834 if (link_caps.default_speed_autoneg) 1835 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1836 if (params.pause.autoneg || 1837 (params.pause.forced_rx && params.pause.forced_tx)) 1838 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1839 if (params.pause.autoneg || params.pause.forced_rx || 1840 params.pause.forced_tx) 1841 if_link->supported_caps |= QED_LM_Pause_BIT; 1842 1843 if_link->advertised_caps = if_link->supported_caps; 1844 if (params.speed.autoneg) 1845 if_link->advertised_caps |= QED_LM_Autoneg_BIT; 1846 else 1847 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; 1848 1849 /* Fill link advertised capability*/ 1850 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 1851 &if_link->advertised_caps); 1852 /* Fill link supported capability*/ 1853 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 1854 &if_link->supported_caps); 1855 1856 if (link.link_up) 1857 if_link->speed = link.speed; 1858 1859 /* TODO - fill duplex properly */ 1860 if_link->duplex = DUPLEX_FULL; 1861 qed_mcp_get_media_type(hwfn, ptt, &media_type); 1862 if_link->port = qed_get_port_type(media_type); 1863 1864 if_link->autoneg = params.speed.autoneg; 1865 1866 if (params.pause.autoneg) 1867 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1868 if (params.pause.forced_rx) 1869 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1870 if (params.pause.forced_tx) 1871 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1872 1873 /* Link partner capabilities */ 1874 if (link.partner_adv_speed & 1875 QED_LINK_PARTNER_SPEED_1G_FD) 1876 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1877 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1878 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1879 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) 1880 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; 1881 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1882 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1883 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1884 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1885 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1886 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1887 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1888 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1889 1890 if (link.an_complete) 1891 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1892 1893 if (link.partner_adv_pause) 1894 if_link->lp_caps |= QED_LM_Pause_BIT; 1895 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1896 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1897 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1898 1899 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 1900 if_link->eee_supported = false; 1901 } else { 1902 if_link->eee_supported = true; 1903 if_link->eee_active = link.eee_active; 1904 if_link->sup_caps = link_caps.eee_speed_caps; 1905 /* MFW clears adv_caps on eee disable; use configured value */ 1906 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 1907 params.eee.adv_caps; 1908 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 1909 if_link->eee.enable = params.eee.enable; 1910 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 1911 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 1912 } 1913 } 1914 1915 static void qed_get_current_link(struct qed_dev *cdev, 1916 struct qed_link_output *if_link) 1917 { 1918 struct qed_hwfn *hwfn; 1919 struct qed_ptt *ptt; 1920 int i; 1921 1922 hwfn = &cdev->hwfns[0]; 1923 if (IS_PF(cdev)) { 1924 ptt = qed_ptt_acquire(hwfn); 1925 if (ptt) { 1926 qed_fill_link(hwfn, ptt, if_link); 1927 qed_ptt_release(hwfn, ptt); 1928 } else { 1929 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 1930 } 1931 } else { 1932 qed_fill_link(hwfn, NULL, if_link); 1933 } 1934 1935 for_each_hwfn(cdev, i) 1936 qed_inform_vf_link_state(&cdev->hwfns[i]); 1937 } 1938 1939 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 1940 { 1941 void *cookie = hwfn->cdev->ops_cookie; 1942 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1943 struct qed_link_output if_link; 1944 1945 qed_fill_link(hwfn, ptt, &if_link); 1946 qed_inform_vf_link_state(hwfn); 1947 1948 if (IS_LEAD_HWFN(hwfn) && cookie) 1949 op->link_update(cookie, &if_link); 1950 } 1951 1952 static int qed_drain(struct qed_dev *cdev) 1953 { 1954 struct qed_hwfn *hwfn; 1955 struct qed_ptt *ptt; 1956 int i, rc; 1957 1958 if (IS_VF(cdev)) 1959 return 0; 1960 1961 for_each_hwfn(cdev, i) { 1962 hwfn = &cdev->hwfns[i]; 1963 ptt = qed_ptt_acquire(hwfn); 1964 if (!ptt) { 1965 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1966 return -EBUSY; 1967 } 1968 rc = qed_mcp_drain(hwfn, ptt); 1969 qed_ptt_release(hwfn, ptt); 1970 if (rc) 1971 return rc; 1972 } 1973 1974 return 0; 1975 } 1976 1977 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 1978 struct qed_nvm_image_att *nvm_image, 1979 u32 *crc) 1980 { 1981 u8 *buf = NULL; 1982 int rc, j; 1983 u32 val; 1984 1985 /* Allocate a buffer for holding the nvram image */ 1986 buf = kzalloc(nvm_image->length, GFP_KERNEL); 1987 if (!buf) 1988 return -ENOMEM; 1989 1990 /* Read image into buffer */ 1991 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 1992 buf, nvm_image->length); 1993 if (rc) { 1994 DP_ERR(cdev, "Failed reading image from nvm\n"); 1995 goto out; 1996 } 1997 1998 /* Convert the buffer into big-endian format (excluding the 1999 * closing 4 bytes of CRC). 2000 */ 2001 for (j = 0; j < nvm_image->length - 4; j += 4) { 2002 val = cpu_to_be32(*(u32 *)&buf[j]); 2003 *(u32 *)&buf[j] = val; 2004 } 2005 2006 /* Calc CRC for the "actual" image buffer, i.e. not including 2007 * the last 4 CRC bytes. 2008 */ 2009 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); 2010 2011 out: 2012 kfree(buf); 2013 2014 return rc; 2015 } 2016 2017 /* Binary file format - 2018 * /----------------------------------------------------------------------\ 2019 * 0B | 0x4 [command index] | 2020 * 4B | image_type | Options | Number of register settings | 2021 * 8B | Value | 2022 * 12B | Mask | 2023 * 16B | Offset | 2024 * \----------------------------------------------------------------------/ 2025 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2026 * Options - 0'b - Calculate & Update CRC for image 2027 */ 2028 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2029 bool *check_resp) 2030 { 2031 struct qed_nvm_image_att nvm_image; 2032 struct qed_hwfn *p_hwfn; 2033 bool is_crc = false; 2034 u32 image_type; 2035 int rc = 0, i; 2036 u16 len; 2037 2038 *data += 4; 2039 image_type = **data; 2040 p_hwfn = QED_LEADING_HWFN(cdev); 2041 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2042 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2043 break; 2044 if (i == p_hwfn->nvm_info.num_images) { 2045 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2046 image_type); 2047 return -ENOENT; 2048 } 2049 2050 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2051 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2052 2053 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2054 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2055 **data, image_type, nvm_image.start_addr, 2056 nvm_image.start_addr + nvm_image.length - 1); 2057 (*data)++; 2058 is_crc = !!(**data & BIT(0)); 2059 (*data)++; 2060 len = *((u16 *)*data); 2061 *data += 2; 2062 if (is_crc) { 2063 u32 crc = 0; 2064 2065 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2066 if (rc) { 2067 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2068 goto exit; 2069 } 2070 2071 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2072 (nvm_image.start_addr + 2073 nvm_image.length - 4), (u8 *)&crc, 4); 2074 if (rc) 2075 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2076 nvm_image.start_addr + nvm_image.length - 4, rc); 2077 goto exit; 2078 } 2079 2080 /* Iterate over the values for setting */ 2081 while (len) { 2082 u32 offset, mask, value, cur_value; 2083 u8 buf[4]; 2084 2085 value = *((u32 *)*data); 2086 *data += 4; 2087 mask = *((u32 *)*data); 2088 *data += 4; 2089 offset = *((u32 *)*data); 2090 *data += 4; 2091 2092 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2093 4); 2094 if (rc) { 2095 DP_ERR(cdev, "Failed reading from %08x\n", 2096 nvm_image.start_addr + offset); 2097 goto exit; 2098 } 2099 2100 cur_value = le32_to_cpu(*((__le32 *)buf)); 2101 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2102 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2103 nvm_image.start_addr + offset, cur_value, 2104 (cur_value & ~mask) | (value & mask), value, mask); 2105 value = (value & mask) | (cur_value & ~mask); 2106 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2107 nvm_image.start_addr + offset, 2108 (u8 *)&value, 4); 2109 if (rc) { 2110 DP_ERR(cdev, "Failed writing to %08x\n", 2111 nvm_image.start_addr + offset); 2112 goto exit; 2113 } 2114 2115 len--; 2116 } 2117 exit: 2118 return rc; 2119 } 2120 2121 /* Binary file format - 2122 * /----------------------------------------------------------------------\ 2123 * 0B | 0x3 [command index] | 2124 * 4B | b'0: check_response? | b'1-31 reserved | 2125 * 8B | File-type | reserved | 2126 * 12B | Image length in bytes | 2127 * \----------------------------------------------------------------------/ 2128 * Start a new file of the provided type 2129 */ 2130 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2131 const u8 **data, bool *check_resp) 2132 { 2133 u32 file_type, file_size = 0; 2134 int rc; 2135 2136 *data += 4; 2137 *check_resp = !!(**data & BIT(0)); 2138 *data += 4; 2139 file_type = **data; 2140 2141 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2142 "About to start a new file of type %02x\n", file_type); 2143 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2144 *data += 4; 2145 file_size = *((u32 *)(*data)); 2146 } 2147 2148 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2149 (u8 *)(&file_size), 4); 2150 *data += 4; 2151 2152 return rc; 2153 } 2154 2155 /* Binary file format - 2156 * /----------------------------------------------------------------------\ 2157 * 0B | 0x2 [command index] | 2158 * 4B | Length in bytes | 2159 * 8B | b'0: check_response? | b'1-31 reserved | 2160 * 12B | Offset in bytes | 2161 * 16B | Data ... | 2162 * \----------------------------------------------------------------------/ 2163 * Write data as part of a file that was previously started. Data should be 2164 * of length equal to that provided in the message 2165 */ 2166 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2167 const u8 **data, bool *check_resp) 2168 { 2169 u32 offset, len; 2170 int rc; 2171 2172 *data += 4; 2173 len = *((u32 *)(*data)); 2174 *data += 4; 2175 *check_resp = !!(**data & BIT(0)); 2176 *data += 4; 2177 offset = *((u32 *)(*data)); 2178 *data += 4; 2179 2180 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2181 "About to write File-data: %08x bytes to offset %08x\n", 2182 len, offset); 2183 2184 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2185 (char *)(*data), len); 2186 *data += len; 2187 2188 return rc; 2189 } 2190 2191 /* Binary file format [General header] - 2192 * /----------------------------------------------------------------------\ 2193 * 0B | QED_NVM_SIGNATURE | 2194 * 4B | Length in bytes | 2195 * 8B | Highest command in this batchfile | Reserved | 2196 * \----------------------------------------------------------------------/ 2197 */ 2198 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2199 const struct firmware *image, 2200 const u8 **data) 2201 { 2202 u32 signature, len; 2203 2204 /* Check minimum size */ 2205 if (image->size < 12) { 2206 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2207 return -EINVAL; 2208 } 2209 2210 /* Check signature */ 2211 signature = *((u32 *)(*data)); 2212 if (signature != QED_NVM_SIGNATURE) { 2213 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2214 return -EINVAL; 2215 } 2216 2217 *data += 4; 2218 /* Validate internal size equals the image-size */ 2219 len = *((u32 *)(*data)); 2220 if (len != image->size) { 2221 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2222 len, (u32)image->size); 2223 return -EINVAL; 2224 } 2225 2226 *data += 4; 2227 /* Make sure driver familiar with all commands necessary for this */ 2228 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2229 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2230 *((u16 *)(*data))); 2231 return -EINVAL; 2232 } 2233 2234 *data += 4; 2235 2236 return 0; 2237 } 2238 2239 /* Binary file format - 2240 * /----------------------------------------------------------------------\ 2241 * 0B | 0x5 [command index] | 2242 * 4B | Number of config attributes | Reserved | 2243 * 4B | Config ID | Entity ID | Length | 2244 * 4B | Value | 2245 * | | 2246 * \----------------------------------------------------------------------/ 2247 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2248 * 'Number of config attributes'. 2249 * 2250 * The API parses config attributes from the user provided buffer and flashes 2251 * them to the respective NVM path using Management FW inerface. 2252 */ 2253 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2254 { 2255 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2256 u8 entity_id, len, buf[32]; 2257 bool need_nvm_init = true; 2258 struct qed_ptt *ptt; 2259 u16 cfg_id, count; 2260 int rc = 0, i; 2261 u32 flags; 2262 2263 ptt = qed_ptt_acquire(hwfn); 2264 if (!ptt) 2265 return -EAGAIN; 2266 2267 /* NVM CFG ID attribute header */ 2268 *data += 4; 2269 count = *((u16 *)*data); 2270 *data += 4; 2271 2272 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2273 "Read config ids: num_attrs = %0d\n", count); 2274 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2275 * arithmetic operations in the implementation. 2276 */ 2277 for (i = 1; i <= count; i++) { 2278 cfg_id = *((u16 *)*data); 2279 *data += 2; 2280 entity_id = **data; 2281 (*data)++; 2282 len = **data; 2283 (*data)++; 2284 memcpy(buf, *data, len); 2285 *data += len; 2286 2287 flags = 0; 2288 if (need_nvm_init) { 2289 flags |= QED_NVM_CFG_OPTION_INIT; 2290 need_nvm_init = false; 2291 } 2292 2293 /* Commit to flash and free the resources */ 2294 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2295 flags |= QED_NVM_CFG_OPTION_COMMIT | 2296 QED_NVM_CFG_OPTION_FREE; 2297 need_nvm_init = true; 2298 } 2299 2300 if (entity_id) 2301 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2302 2303 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2304 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2305 entity_id, len); 2306 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2307 buf, len); 2308 if (rc) { 2309 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2310 break; 2311 } 2312 } 2313 2314 qed_ptt_release(hwfn, ptt); 2315 2316 return rc; 2317 } 2318 2319 #define QED_MAX_NVM_BUF_LEN 32 2320 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2321 { 2322 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2323 u8 buf[QED_MAX_NVM_BUF_LEN]; 2324 struct qed_ptt *ptt; 2325 u32 len; 2326 int rc; 2327 2328 ptt = qed_ptt_acquire(hwfn); 2329 if (!ptt) 2330 return QED_MAX_NVM_BUF_LEN; 2331 2332 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2333 &len); 2334 if (rc || !len) { 2335 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2336 len = QED_MAX_NVM_BUF_LEN; 2337 } 2338 2339 qed_ptt_release(hwfn, ptt); 2340 2341 return len; 2342 } 2343 2344 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2345 u32 cmd, u32 entity_id) 2346 { 2347 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2348 struct qed_ptt *ptt; 2349 u32 flags, len; 2350 int rc = 0; 2351 2352 ptt = qed_ptt_acquire(hwfn); 2353 if (!ptt) 2354 return -EAGAIN; 2355 2356 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2357 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2358 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2359 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2360 if (rc) 2361 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2362 2363 qed_ptt_release(hwfn, ptt); 2364 2365 return rc; 2366 } 2367 2368 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2369 { 2370 const struct firmware *image; 2371 const u8 *data, *data_end; 2372 u32 cmd_type; 2373 int rc; 2374 2375 rc = request_firmware(&image, name, &cdev->pdev->dev); 2376 if (rc) { 2377 DP_ERR(cdev, "Failed to find '%s'\n", name); 2378 return rc; 2379 } 2380 2381 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2382 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2383 name, image->data, (u32)image->size); 2384 data = image->data; 2385 data_end = data + image->size; 2386 2387 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2388 if (rc) 2389 goto exit; 2390 2391 while (data < data_end) { 2392 bool check_resp = false; 2393 2394 /* Parse the actual command */ 2395 cmd_type = *((u32 *)data); 2396 switch (cmd_type) { 2397 case QED_NVM_FLASH_CMD_FILE_DATA: 2398 rc = qed_nvm_flash_image_file_data(cdev, &data, 2399 &check_resp); 2400 break; 2401 case QED_NVM_FLASH_CMD_FILE_START: 2402 rc = qed_nvm_flash_image_file_start(cdev, &data, 2403 &check_resp); 2404 break; 2405 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2406 rc = qed_nvm_flash_image_access(cdev, &data, 2407 &check_resp); 2408 break; 2409 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2410 rc = qed_nvm_flash_cfg_write(cdev, &data); 2411 break; 2412 default: 2413 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2414 rc = -EINVAL; 2415 goto exit; 2416 } 2417 2418 if (rc) { 2419 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2420 goto exit; 2421 } 2422 2423 /* Check response if needed */ 2424 if (check_resp) { 2425 u32 mcp_response = 0; 2426 2427 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2428 DP_ERR(cdev, "Failed getting MCP response\n"); 2429 rc = -EINVAL; 2430 goto exit; 2431 } 2432 2433 switch (mcp_response & FW_MSG_CODE_MASK) { 2434 case FW_MSG_CODE_OK: 2435 case FW_MSG_CODE_NVM_OK: 2436 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2437 case FW_MSG_CODE_PHY_OK: 2438 break; 2439 default: 2440 DP_ERR(cdev, "MFW returns error: %08x\n", 2441 mcp_response); 2442 rc = -EINVAL; 2443 goto exit; 2444 } 2445 } 2446 } 2447 2448 exit: 2449 release_firmware(image); 2450 2451 return rc; 2452 } 2453 2454 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2455 u8 *buf, u16 len) 2456 { 2457 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2458 2459 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2460 } 2461 2462 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2463 { 2464 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2465 void *cookie = p_hwfn->cdev->ops_cookie; 2466 2467 if (ops && ops->schedule_recovery_handler) 2468 ops->schedule_recovery_handler(cookie); 2469 } 2470 2471 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2472 void *handle) 2473 { 2474 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2475 } 2476 2477 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2478 { 2479 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2480 struct qed_ptt *ptt; 2481 int status = 0; 2482 2483 ptt = qed_ptt_acquire(hwfn); 2484 if (!ptt) 2485 return -EAGAIN; 2486 2487 status = qed_mcp_set_led(hwfn, ptt, mode); 2488 2489 qed_ptt_release(hwfn, ptt); 2490 2491 return status; 2492 } 2493 2494 static int qed_recovery_process(struct qed_dev *cdev) 2495 { 2496 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2497 struct qed_ptt *p_ptt; 2498 int rc = 0; 2499 2500 p_ptt = qed_ptt_acquire(p_hwfn); 2501 if (!p_ptt) 2502 return -EAGAIN; 2503 2504 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2505 2506 qed_ptt_release(p_hwfn, p_ptt); 2507 2508 return rc; 2509 } 2510 2511 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2512 { 2513 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2514 struct qed_ptt *ptt; 2515 int rc = 0; 2516 2517 if (IS_VF(cdev)) 2518 return 0; 2519 2520 ptt = qed_ptt_acquire(hwfn); 2521 if (!ptt) 2522 return -EAGAIN; 2523 2524 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2525 : QED_OV_WOL_DISABLED); 2526 if (rc) 2527 goto out; 2528 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2529 2530 out: 2531 qed_ptt_release(hwfn, ptt); 2532 return rc; 2533 } 2534 2535 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2536 { 2537 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2538 struct qed_ptt *ptt; 2539 int status = 0; 2540 2541 if (IS_VF(cdev)) 2542 return 0; 2543 2544 ptt = qed_ptt_acquire(hwfn); 2545 if (!ptt) 2546 return -EAGAIN; 2547 2548 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2549 QED_OV_DRIVER_STATE_ACTIVE : 2550 QED_OV_DRIVER_STATE_DISABLED); 2551 2552 qed_ptt_release(hwfn, ptt); 2553 2554 return status; 2555 } 2556 2557 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2558 { 2559 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2560 struct qed_ptt *ptt; 2561 int status = 0; 2562 2563 if (IS_VF(cdev)) 2564 return 0; 2565 2566 ptt = qed_ptt_acquire(hwfn); 2567 if (!ptt) 2568 return -EAGAIN; 2569 2570 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2571 if (status) 2572 goto out; 2573 2574 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2575 2576 out: 2577 qed_ptt_release(hwfn, ptt); 2578 return status; 2579 } 2580 2581 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2582 { 2583 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2584 struct qed_ptt *ptt; 2585 int status = 0; 2586 2587 if (IS_VF(cdev)) 2588 return 0; 2589 2590 ptt = qed_ptt_acquire(hwfn); 2591 if (!ptt) 2592 return -EAGAIN; 2593 2594 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2595 if (status) 2596 goto out; 2597 2598 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2599 2600 out: 2601 qed_ptt_release(hwfn, ptt); 2602 return status; 2603 } 2604 2605 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2606 u8 dev_addr, u32 offset, u32 len) 2607 { 2608 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2609 struct qed_ptt *ptt; 2610 int rc = 0; 2611 2612 if (IS_VF(cdev)) 2613 return 0; 2614 2615 ptt = qed_ptt_acquire(hwfn); 2616 if (!ptt) 2617 return -EAGAIN; 2618 2619 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2620 offset, len, buf); 2621 2622 qed_ptt_release(hwfn, ptt); 2623 2624 return rc; 2625 } 2626 2627 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2628 { 2629 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2630 struct qed_ptt *ptt; 2631 int rc = 0; 2632 2633 if (IS_VF(cdev)) 2634 return 0; 2635 2636 ptt = qed_ptt_acquire(hwfn); 2637 if (!ptt) 2638 return -EAGAIN; 2639 2640 rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val); 2641 2642 qed_ptt_release(hwfn, ptt); 2643 2644 return rc; 2645 } 2646 2647 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2648 { 2649 return QED_AFFIN_HWFN_IDX(cdev); 2650 } 2651 2652 static struct qed_selftest_ops qed_selftest_ops_pass = { 2653 .selftest_memory = &qed_selftest_memory, 2654 .selftest_interrupt = &qed_selftest_interrupt, 2655 .selftest_register = &qed_selftest_register, 2656 .selftest_clock = &qed_selftest_clock, 2657 .selftest_nvram = &qed_selftest_nvram, 2658 }; 2659 2660 const struct qed_common_ops qed_common_ops_pass = { 2661 .selftest = &qed_selftest_ops_pass, 2662 .probe = &qed_probe, 2663 .remove = &qed_remove, 2664 .set_power_state = &qed_set_power_state, 2665 .set_name = &qed_set_name, 2666 .update_pf_params = &qed_update_pf_params, 2667 .slowpath_start = &qed_slowpath_start, 2668 .slowpath_stop = &qed_slowpath_stop, 2669 .set_fp_int = &qed_set_int_fp, 2670 .get_fp_int = &qed_get_int_fp, 2671 .sb_init = &qed_sb_init, 2672 .sb_release = &qed_sb_release, 2673 .simd_handler_config = &qed_simd_handler_config, 2674 .simd_handler_clean = &qed_simd_handler_clean, 2675 .dbg_grc = &qed_dbg_grc, 2676 .dbg_grc_size = &qed_dbg_grc_size, 2677 .can_link_change = &qed_can_link_change, 2678 .set_link = &qed_set_link, 2679 .get_link = &qed_get_current_link, 2680 .drain = &qed_drain, 2681 .update_msglvl = &qed_init_dp, 2682 .dbg_all_data = &qed_dbg_all_data, 2683 .dbg_all_data_size = &qed_dbg_all_data_size, 2684 .chain_alloc = &qed_chain_alloc, 2685 .chain_free = &qed_chain_free, 2686 .nvm_flash = &qed_nvm_flash, 2687 .nvm_get_image = &qed_nvm_get_image, 2688 .set_coalesce = &qed_set_coalesce, 2689 .set_led = &qed_set_led, 2690 .recovery_process = &qed_recovery_process, 2691 .recovery_prolog = &qed_recovery_prolog, 2692 .update_drv_state = &qed_update_drv_state, 2693 .update_mac = &qed_update_mac, 2694 .update_mtu = &qed_update_mtu, 2695 .update_wol = &qed_update_wol, 2696 .db_recovery_add = &qed_db_recovery_add, 2697 .db_recovery_del = &qed_db_recovery_del, 2698 .read_module_eeprom = &qed_read_module_eeprom, 2699 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 2700 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 2701 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 2702 .set_grc_config = &qed_set_grc_config, 2703 }; 2704 2705 void qed_get_protocol_stats(struct qed_dev *cdev, 2706 enum qed_mcp_protocol_type type, 2707 union qed_mcp_protocol_stats *stats) 2708 { 2709 struct qed_eth_stats eth_stats; 2710 2711 memset(stats, 0, sizeof(*stats)); 2712 2713 switch (type) { 2714 case QED_MCP_LAN_STATS: 2715 qed_get_vport_stats(cdev, ð_stats); 2716 stats->lan_stats.ucast_rx_pkts = 2717 eth_stats.common.rx_ucast_pkts; 2718 stats->lan_stats.ucast_tx_pkts = 2719 eth_stats.common.tx_ucast_pkts; 2720 stats->lan_stats.fcs_err = -1; 2721 break; 2722 case QED_MCP_FCOE_STATS: 2723 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 2724 break; 2725 case QED_MCP_ISCSI_STATS: 2726 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 2727 break; 2728 default: 2729 DP_VERBOSE(cdev, QED_MSG_SP, 2730 "Invalid protocol type = %d\n", type); 2731 return; 2732 } 2733 } 2734 2735 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 2736 { 2737 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 2738 "Scheduling slowpath task [Flag: %d]\n", 2739 QED_SLOWPATH_MFW_TLV_REQ); 2740 smp_mb__before_atomic(); 2741 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 2742 smp_mb__after_atomic(); 2743 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 2744 2745 return 0; 2746 } 2747 2748 static void 2749 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 2750 { 2751 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 2752 struct qed_eth_stats_common *p_common; 2753 struct qed_generic_tlvs gen_tlvs; 2754 struct qed_eth_stats stats; 2755 int i; 2756 2757 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 2758 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 2759 2760 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 2761 tlv->flags.ipv4_csum_offload = true; 2762 if (gen_tlvs.feat_flags & QED_TLV_LSO) 2763 tlv->flags.lso_supported = true; 2764 tlv->flags.b_set = true; 2765 2766 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 2767 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 2768 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 2769 tlv->mac_set[i] = true; 2770 } 2771 } 2772 2773 qed_get_vport_stats(cdev, &stats); 2774 p_common = &stats.common; 2775 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 2776 p_common->rx_bcast_pkts; 2777 tlv->rx_frames_set = true; 2778 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 2779 p_common->rx_bcast_bytes; 2780 tlv->rx_bytes_set = true; 2781 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 2782 p_common->tx_bcast_pkts; 2783 tlv->tx_frames_set = true; 2784 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 2785 p_common->tx_bcast_bytes; 2786 tlv->rx_bytes_set = true; 2787 } 2788 2789 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 2790 union qed_mfw_tlv_data *tlv_buf) 2791 { 2792 struct qed_dev *cdev = hwfn->cdev; 2793 struct qed_common_cb_ops *ops; 2794 2795 ops = cdev->protocol_ops.common; 2796 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 2797 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 2798 return -EINVAL; 2799 } 2800 2801 switch (type) { 2802 case QED_MFW_TLV_GENERIC: 2803 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 2804 break; 2805 case QED_MFW_TLV_ETH: 2806 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 2807 break; 2808 case QED_MFW_TLV_FCOE: 2809 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 2810 break; 2811 case QED_MFW_TLV_ISCSI: 2812 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 2813 break; 2814 default: 2815 break; 2816 } 2817 2818 return 0; 2819 } 2820