1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/version.h> 38 #include <linux/delay.h> 39 #include <asm/byteorder.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/string.h> 42 #include <linux/module.h> 43 #include <linux/interrupt.h> 44 #include <linux/workqueue.h> 45 #include <linux/ethtool.h> 46 #include <linux/etherdevice.h> 47 #include <linux/vmalloc.h> 48 #include <linux/crash_dump.h> 49 #include <linux/qed/qed_if.h> 50 #include <linux/qed/qed_ll2_if.h> 51 52 #include "qed.h" 53 #include "qed_sriov.h" 54 #include "qed_sp.h" 55 #include "qed_dev_api.h" 56 #include "qed_ll2.h" 57 #include "qed_fcoe.h" 58 #include "qed_mcp.h" 59 #include "qed_hw.h" 60 #include "qed_selftest.h" 61 #include "qed_debug.h" 62 63 #define QED_ROCE_QPS (8192) 64 #define QED_ROCE_DPIS (8) 65 66 static char version[] = 67 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 68 69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 70 MODULE_LICENSE("GPL"); 71 MODULE_VERSION(DRV_MODULE_VERSION); 72 73 #define FW_FILE_VERSION \ 74 __stringify(FW_MAJOR_VERSION) "." \ 75 __stringify(FW_MINOR_VERSION) "." \ 76 __stringify(FW_REVISION_VERSION) "." \ 77 __stringify(FW_ENGINEERING_VERSION) 78 79 #define QED_FW_FILE_NAME \ 80 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 81 82 MODULE_FIRMWARE(QED_FW_FILE_NAME); 83 84 static int __init qed_init(void) 85 { 86 pr_info("%s", version); 87 88 return 0; 89 } 90 91 static void __exit qed_cleanup(void) 92 { 93 pr_notice("qed_cleanup called\n"); 94 } 95 96 module_init(qed_init); 97 module_exit(qed_cleanup); 98 99 /* Check if the DMA controller on the machine can properly handle the DMA 100 * addressing required by the device. 101 */ 102 static int qed_set_coherency_mask(struct qed_dev *cdev) 103 { 104 struct device *dev = &cdev->pdev->dev; 105 106 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 107 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 108 DP_NOTICE(cdev, 109 "Can't request 64-bit consistent allocations\n"); 110 return -EIO; 111 } 112 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 113 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 114 return -EIO; 115 } 116 117 return 0; 118 } 119 120 static void qed_free_pci(struct qed_dev *cdev) 121 { 122 struct pci_dev *pdev = cdev->pdev; 123 124 if (cdev->doorbells) 125 iounmap(cdev->doorbells); 126 if (cdev->regview) 127 iounmap(cdev->regview); 128 if (atomic_read(&pdev->enable_cnt) == 1) 129 pci_release_regions(pdev); 130 131 pci_disable_device(pdev); 132 } 133 134 #define PCI_REVISION_ID_ERROR_VAL 0xff 135 136 /* Performs PCI initializations as well as initializing PCI-related parameters 137 * in the device structrue. Returns 0 in case of success. 138 */ 139 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 140 { 141 u8 rev_id; 142 int rc; 143 144 cdev->pdev = pdev; 145 146 rc = pci_enable_device(pdev); 147 if (rc) { 148 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 149 goto err0; 150 } 151 152 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 153 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 154 rc = -EIO; 155 goto err1; 156 } 157 158 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 159 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 160 rc = -EIO; 161 goto err1; 162 } 163 164 if (atomic_read(&pdev->enable_cnt) == 1) { 165 rc = pci_request_regions(pdev, "qed"); 166 if (rc) { 167 DP_NOTICE(cdev, 168 "Failed to request PCI memory resources\n"); 169 goto err1; 170 } 171 pci_set_master(pdev); 172 pci_save_state(pdev); 173 } 174 175 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 176 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 177 DP_NOTICE(cdev, 178 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 179 rev_id); 180 rc = -ENODEV; 181 goto err2; 182 } 183 if (!pci_is_pcie(pdev)) { 184 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 185 rc = -EIO; 186 goto err2; 187 } 188 189 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 190 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 191 DP_NOTICE(cdev, "Cannot find power management capability\n"); 192 193 rc = qed_set_coherency_mask(cdev); 194 if (rc) 195 goto err2; 196 197 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 198 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 199 cdev->pci_params.irq = pdev->irq; 200 201 cdev->regview = pci_ioremap_bar(pdev, 0); 202 if (!cdev->regview) { 203 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 204 rc = -ENOMEM; 205 goto err2; 206 } 207 208 if (IS_PF(cdev)) { 209 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 210 cdev->db_size = pci_resource_len(cdev->pdev, 2); 211 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 212 if (!cdev->doorbells) { 213 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 214 return -ENOMEM; 215 } 216 } 217 218 return 0; 219 220 err2: 221 pci_release_regions(pdev); 222 err1: 223 pci_disable_device(pdev); 224 err0: 225 return rc; 226 } 227 228 int qed_fill_dev_info(struct qed_dev *cdev, 229 struct qed_dev_info *dev_info) 230 { 231 struct qed_ptt *ptt; 232 233 memset(dev_info, 0, sizeof(struct qed_dev_info)); 234 235 dev_info->num_hwfns = cdev->num_hwfns; 236 dev_info->pci_mem_start = cdev->pci_params.mem_start; 237 dev_info->pci_mem_end = cdev->pci_params.mem_end; 238 dev_info->pci_irq = cdev->pci_params.irq; 239 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == 240 QED_PCI_ETH_ROCE); 241 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 242 dev_info->dev_type = cdev->type; 243 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 244 245 if (IS_PF(cdev)) { 246 dev_info->fw_major = FW_MAJOR_VERSION; 247 dev_info->fw_minor = FW_MINOR_VERSION; 248 dev_info->fw_rev = FW_REVISION_VERSION; 249 dev_info->fw_eng = FW_ENGINEERING_VERSION; 250 dev_info->mf_mode = cdev->mf_mode; 251 dev_info->tx_switching = true; 252 253 if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == 254 QED_WOL_SUPPORT_PME) 255 dev_info->wol_support = true; 256 } else { 257 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 258 &dev_info->fw_minor, &dev_info->fw_rev, 259 &dev_info->fw_eng); 260 } 261 262 if (IS_PF(cdev)) { 263 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 264 if (ptt) { 265 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 266 &dev_info->mfw_rev, NULL); 267 268 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 269 &dev_info->flash_size); 270 271 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 272 } 273 } else { 274 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 275 &dev_info->mfw_rev, NULL); 276 } 277 278 dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; 279 280 return 0; 281 } 282 283 static void qed_free_cdev(struct qed_dev *cdev) 284 { 285 kfree((void *)cdev); 286 } 287 288 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 289 { 290 struct qed_dev *cdev; 291 292 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 293 if (!cdev) 294 return cdev; 295 296 qed_init_struct(cdev); 297 298 return cdev; 299 } 300 301 /* Sets the requested power state */ 302 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 303 { 304 if (!cdev) 305 return -ENODEV; 306 307 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 308 return 0; 309 } 310 311 /* probing */ 312 static struct qed_dev *qed_probe(struct pci_dev *pdev, 313 struct qed_probe_params *params) 314 { 315 struct qed_dev *cdev; 316 int rc; 317 318 cdev = qed_alloc_cdev(pdev); 319 if (!cdev) 320 goto err0; 321 322 cdev->protocol = params->protocol; 323 324 if (params->is_vf) 325 cdev->b_is_vf = true; 326 327 qed_init_dp(cdev, params->dp_module, params->dp_level); 328 329 rc = qed_init_pci(cdev, pdev); 330 if (rc) { 331 DP_ERR(cdev, "init pci failed\n"); 332 goto err1; 333 } 334 DP_INFO(cdev, "PCI init completed successfully\n"); 335 336 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 337 if (rc) { 338 DP_ERR(cdev, "hw prepare failed\n"); 339 goto err2; 340 } 341 342 DP_INFO(cdev, "qed_probe completed successffuly\n"); 343 344 return cdev; 345 346 err2: 347 qed_free_pci(cdev); 348 err1: 349 qed_free_cdev(cdev); 350 err0: 351 return NULL; 352 } 353 354 static void qed_remove(struct qed_dev *cdev) 355 { 356 if (!cdev) 357 return; 358 359 qed_hw_remove(cdev); 360 361 qed_free_pci(cdev); 362 363 qed_set_power_state(cdev, PCI_D3hot); 364 365 qed_free_cdev(cdev); 366 } 367 368 static void qed_disable_msix(struct qed_dev *cdev) 369 { 370 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 371 pci_disable_msix(cdev->pdev); 372 kfree(cdev->int_params.msix_table); 373 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 374 pci_disable_msi(cdev->pdev); 375 } 376 377 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 378 } 379 380 static int qed_enable_msix(struct qed_dev *cdev, 381 struct qed_int_params *int_params) 382 { 383 int i, rc, cnt; 384 385 cnt = int_params->in.num_vectors; 386 387 for (i = 0; i < cnt; i++) 388 int_params->msix_table[i].entry = i; 389 390 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 391 int_params->in.min_msix_cnt, cnt); 392 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 393 (rc % cdev->num_hwfns)) { 394 pci_disable_msix(cdev->pdev); 395 396 /* If fastpath is initialized, we need at least one interrupt 397 * per hwfn [and the slow path interrupts]. New requested number 398 * should be a multiple of the number of hwfns. 399 */ 400 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 401 DP_NOTICE(cdev, 402 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 403 cnt, int_params->in.num_vectors); 404 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 405 cnt); 406 if (!rc) 407 rc = cnt; 408 } 409 410 if (rc > 0) { 411 /* MSI-x configuration was achieved */ 412 int_params->out.int_mode = QED_INT_MODE_MSIX; 413 int_params->out.num_vectors = rc; 414 rc = 0; 415 } else { 416 DP_NOTICE(cdev, 417 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 418 cnt, rc); 419 } 420 421 return rc; 422 } 423 424 /* This function outputs the int mode and the number of enabled msix vector */ 425 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 426 { 427 struct qed_int_params *int_params = &cdev->int_params; 428 struct msix_entry *tbl; 429 int rc = 0, cnt; 430 431 switch (int_params->in.int_mode) { 432 case QED_INT_MODE_MSIX: 433 /* Allocate MSIX table */ 434 cnt = int_params->in.num_vectors; 435 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 436 if (!int_params->msix_table) { 437 rc = -ENOMEM; 438 goto out; 439 } 440 441 /* Enable MSIX */ 442 rc = qed_enable_msix(cdev, int_params); 443 if (!rc) 444 goto out; 445 446 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 447 kfree(int_params->msix_table); 448 if (force_mode) 449 goto out; 450 /* Fallthrough */ 451 452 case QED_INT_MODE_MSI: 453 if (cdev->num_hwfns == 1) { 454 rc = pci_enable_msi(cdev->pdev); 455 if (!rc) { 456 int_params->out.int_mode = QED_INT_MODE_MSI; 457 goto out; 458 } 459 460 DP_NOTICE(cdev, "Failed to enable MSI\n"); 461 if (force_mode) 462 goto out; 463 } 464 /* Fallthrough */ 465 466 case QED_INT_MODE_INTA: 467 int_params->out.int_mode = QED_INT_MODE_INTA; 468 rc = 0; 469 goto out; 470 default: 471 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 472 int_params->in.int_mode); 473 rc = -EINVAL; 474 } 475 476 out: 477 if (!rc) 478 DP_INFO(cdev, "Using %s interrupts\n", 479 int_params->out.int_mode == QED_INT_MODE_INTA ? 480 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 481 "MSI" : "MSIX"); 482 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 483 484 return rc; 485 } 486 487 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 488 int index, void(*handler)(void *)) 489 { 490 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 491 int relative_idx = index / cdev->num_hwfns; 492 493 hwfn->simd_proto_handler[relative_idx].func = handler; 494 hwfn->simd_proto_handler[relative_idx].token = token; 495 } 496 497 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 498 { 499 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 500 int relative_idx = index / cdev->num_hwfns; 501 502 memset(&hwfn->simd_proto_handler[relative_idx], 0, 503 sizeof(struct qed_simd_fp_handler)); 504 } 505 506 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 507 { 508 tasklet_schedule((struct tasklet_struct *)tasklet); 509 return IRQ_HANDLED; 510 } 511 512 static irqreturn_t qed_single_int(int irq, void *dev_instance) 513 { 514 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 515 struct qed_hwfn *hwfn; 516 irqreturn_t rc = IRQ_NONE; 517 u64 status; 518 int i, j; 519 520 for (i = 0; i < cdev->num_hwfns; i++) { 521 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 522 523 if (!status) 524 continue; 525 526 hwfn = &cdev->hwfns[i]; 527 528 /* Slowpath interrupt */ 529 if (unlikely(status & 0x1)) { 530 tasklet_schedule(hwfn->sp_dpc); 531 status &= ~0x1; 532 rc = IRQ_HANDLED; 533 } 534 535 /* Fastpath interrupts */ 536 for (j = 0; j < 64; j++) { 537 if ((0x2ULL << j) & status) { 538 hwfn->simd_proto_handler[j].func( 539 hwfn->simd_proto_handler[j].token); 540 status &= ~(0x2ULL << j); 541 rc = IRQ_HANDLED; 542 } 543 } 544 545 if (unlikely(status)) 546 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 547 "got an unknown interrupt status 0x%llx\n", 548 status); 549 } 550 551 return rc; 552 } 553 554 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 555 { 556 struct qed_dev *cdev = hwfn->cdev; 557 u32 int_mode; 558 int rc = 0; 559 u8 id; 560 561 int_mode = cdev->int_params.out.int_mode; 562 if (int_mode == QED_INT_MODE_MSIX) { 563 id = hwfn->my_id; 564 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 565 id, cdev->pdev->bus->number, 566 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 567 rc = request_irq(cdev->int_params.msix_table[id].vector, 568 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 569 } else { 570 unsigned long flags = 0; 571 572 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 573 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 574 PCI_FUNC(cdev->pdev->devfn)); 575 576 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 577 flags |= IRQF_SHARED; 578 579 rc = request_irq(cdev->pdev->irq, qed_single_int, 580 flags, cdev->name, cdev); 581 } 582 583 if (rc) 584 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 585 else 586 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 587 "Requested slowpath %s\n", 588 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 589 590 return rc; 591 } 592 593 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 594 { 595 struct qed_dev *cdev = p_hwfn->cdev; 596 u8 id = p_hwfn->my_id; 597 u32 int_mode; 598 599 int_mode = cdev->int_params.out.int_mode; 600 if (int_mode == QED_INT_MODE_MSIX) 601 synchronize_irq(cdev->int_params.msix_table[id].vector); 602 else 603 synchronize_irq(cdev->pdev->irq); 604 } 605 606 static void qed_slowpath_irq_free(struct qed_dev *cdev) 607 { 608 int i; 609 610 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 611 for_each_hwfn(cdev, i) { 612 if (!cdev->hwfns[i].b_int_requested) 613 break; 614 synchronize_irq(cdev->int_params.msix_table[i].vector); 615 free_irq(cdev->int_params.msix_table[i].vector, 616 cdev->hwfns[i].sp_dpc); 617 } 618 } else { 619 if (QED_LEADING_HWFN(cdev)->b_int_requested) 620 free_irq(cdev->pdev->irq, cdev); 621 } 622 qed_int_disable_post_isr_release(cdev); 623 } 624 625 static int qed_nic_stop(struct qed_dev *cdev) 626 { 627 int i, rc; 628 629 rc = qed_hw_stop(cdev); 630 631 for (i = 0; i < cdev->num_hwfns; i++) { 632 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 633 634 if (p_hwfn->b_sp_dpc_enabled) { 635 tasklet_disable(p_hwfn->sp_dpc); 636 p_hwfn->b_sp_dpc_enabled = false; 637 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 638 "Disabled sp taskelt [hwfn %d] at %p\n", 639 i, p_hwfn->sp_dpc); 640 } 641 } 642 643 qed_dbg_pf_exit(cdev); 644 645 return rc; 646 } 647 648 static int qed_nic_setup(struct qed_dev *cdev) 649 { 650 int rc, i; 651 652 /* Determine if interface is going to require LL2 */ 653 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 654 for (i = 0; i < cdev->num_hwfns; i++) { 655 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 656 657 p_hwfn->using_ll2 = true; 658 } 659 } 660 661 rc = qed_resc_alloc(cdev); 662 if (rc) 663 return rc; 664 665 DP_INFO(cdev, "Allocated qed resources\n"); 666 667 qed_resc_setup(cdev); 668 669 return rc; 670 } 671 672 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 673 { 674 int limit = 0; 675 676 /* Mark the fastpath as free/used */ 677 cdev->int_params.fp_initialized = cnt ? true : false; 678 679 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 680 limit = cdev->num_hwfns * 63; 681 else if (cdev->int_params.fp_msix_cnt) 682 limit = cdev->int_params.fp_msix_cnt; 683 684 if (!limit) 685 return -ENOMEM; 686 687 return min_t(int, cnt, limit); 688 } 689 690 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 691 { 692 memset(info, 0, sizeof(struct qed_int_info)); 693 694 if (!cdev->int_params.fp_initialized) { 695 DP_INFO(cdev, 696 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 697 return -EINVAL; 698 } 699 700 /* Need to expose only MSI-X information; Single IRQ is handled solely 701 * by qed. 702 */ 703 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 704 int msix_base = cdev->int_params.fp_msix_base; 705 706 info->msix_cnt = cdev->int_params.fp_msix_cnt; 707 info->msix = &cdev->int_params.msix_table[msix_base]; 708 } 709 710 return 0; 711 } 712 713 static int qed_slowpath_setup_int(struct qed_dev *cdev, 714 enum qed_int_mode int_mode) 715 { 716 struct qed_sb_cnt_info sb_cnt_info; 717 int num_l2_queues = 0; 718 int rc; 719 int i; 720 721 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 722 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 723 return -EINVAL; 724 } 725 726 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 727 cdev->int_params.in.int_mode = int_mode; 728 for_each_hwfn(cdev, i) { 729 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 730 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 731 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; 732 cdev->int_params.in.num_vectors++; /* slowpath */ 733 } 734 735 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 736 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 737 738 rc = qed_set_int_mode(cdev, false); 739 if (rc) { 740 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 741 return rc; 742 } 743 744 cdev->int_params.fp_msix_base = cdev->num_hwfns; 745 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 746 cdev->num_hwfns; 747 748 if (!IS_ENABLED(CONFIG_QED_RDMA)) 749 return 0; 750 751 for_each_hwfn(cdev, i) 752 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 753 754 DP_VERBOSE(cdev, QED_MSG_RDMA, 755 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 756 cdev->int_params.fp_msix_cnt, num_l2_queues); 757 758 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 759 cdev->int_params.rdma_msix_cnt = 760 (cdev->int_params.fp_msix_cnt - num_l2_queues) 761 / cdev->num_hwfns; 762 cdev->int_params.rdma_msix_base = 763 cdev->int_params.fp_msix_base + num_l2_queues; 764 cdev->int_params.fp_msix_cnt = num_l2_queues; 765 } else { 766 cdev->int_params.rdma_msix_cnt = 0; 767 } 768 769 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 770 cdev->int_params.rdma_msix_cnt, 771 cdev->int_params.rdma_msix_base); 772 773 return 0; 774 } 775 776 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 777 { 778 int rc; 779 780 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 781 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 782 783 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 784 &cdev->int_params.in.num_vectors); 785 if (cdev->num_hwfns > 1) { 786 u8 vectors = 0; 787 788 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 789 cdev->int_params.in.num_vectors += vectors; 790 } 791 792 /* We want a minimum of one fastpath vector per vf hwfn */ 793 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 794 795 rc = qed_set_int_mode(cdev, true); 796 if (rc) 797 return rc; 798 799 cdev->int_params.fp_msix_base = 0; 800 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 801 802 return 0; 803 } 804 805 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 806 u8 *input_buf, u32 max_size, u8 *unzip_buf) 807 { 808 int rc; 809 810 p_hwfn->stream->next_in = input_buf; 811 p_hwfn->stream->avail_in = input_len; 812 p_hwfn->stream->next_out = unzip_buf; 813 p_hwfn->stream->avail_out = max_size; 814 815 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 816 817 if (rc != Z_OK) { 818 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 819 rc); 820 return 0; 821 } 822 823 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 824 zlib_inflateEnd(p_hwfn->stream); 825 826 if (rc != Z_OK && rc != Z_STREAM_END) { 827 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 828 p_hwfn->stream->msg, rc); 829 return 0; 830 } 831 832 return p_hwfn->stream->total_out / 4; 833 } 834 835 static int qed_alloc_stream_mem(struct qed_dev *cdev) 836 { 837 int i; 838 void *workspace; 839 840 for_each_hwfn(cdev, i) { 841 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 842 843 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 844 if (!p_hwfn->stream) 845 return -ENOMEM; 846 847 workspace = vzalloc(zlib_inflate_workspacesize()); 848 if (!workspace) 849 return -ENOMEM; 850 p_hwfn->stream->workspace = workspace; 851 } 852 853 return 0; 854 } 855 856 static void qed_free_stream_mem(struct qed_dev *cdev) 857 { 858 int i; 859 860 for_each_hwfn(cdev, i) { 861 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 862 863 if (!p_hwfn->stream) 864 return; 865 866 vfree(p_hwfn->stream->workspace); 867 kfree(p_hwfn->stream); 868 } 869 } 870 871 static void qed_update_pf_params(struct qed_dev *cdev, 872 struct qed_pf_params *params) 873 { 874 int i; 875 876 if (IS_ENABLED(CONFIG_QED_RDMA)) { 877 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 878 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 879 /* divide by 3 the MRs to avoid MF ILT overflow */ 880 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 881 } 882 883 /* In case we might support RDMA, don't allow qede to be greedy 884 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. 885 */ 886 if (QED_LEADING_HWFN(cdev)->hw_info.personality == 887 QED_PCI_ETH_ROCE) { 888 u16 *num_cons; 889 890 num_cons = ¶ms->eth_pf_params.num_cons; 891 *num_cons = min_t(u16, *num_cons, 192); 892 } 893 894 for (i = 0; i < cdev->num_hwfns; i++) { 895 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 896 897 p_hwfn->pf_params = *params; 898 } 899 } 900 901 static int qed_slowpath_start(struct qed_dev *cdev, 902 struct qed_slowpath_params *params) 903 { 904 struct qed_drv_load_params drv_load_params; 905 struct qed_hw_init_params hw_init_params; 906 struct qed_tunn_start_params tunn_info; 907 struct qed_mcp_drv_version drv_version; 908 const u8 *data = NULL; 909 struct qed_hwfn *hwfn; 910 struct qed_ptt *p_ptt; 911 int rc = -EINVAL; 912 913 if (qed_iov_wq_start(cdev)) 914 goto err; 915 916 if (IS_PF(cdev)) { 917 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 918 &cdev->pdev->dev); 919 if (rc) { 920 DP_NOTICE(cdev, 921 "Failed to find fw file - /lib/firmware/%s\n", 922 QED_FW_FILE_NAME); 923 goto err; 924 } 925 926 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 927 if (p_ptt) { 928 QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt; 929 } else { 930 DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n"); 931 goto err; 932 } 933 } 934 935 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 936 rc = qed_nic_setup(cdev); 937 if (rc) 938 goto err; 939 940 if (IS_PF(cdev)) 941 rc = qed_slowpath_setup_int(cdev, params->int_mode); 942 else 943 rc = qed_slowpath_vf_setup_int(cdev); 944 if (rc) 945 goto err1; 946 947 if (IS_PF(cdev)) { 948 /* Allocate stream for unzipping */ 949 rc = qed_alloc_stream_mem(cdev); 950 if (rc) 951 goto err2; 952 953 /* First Dword used to diffrentiate between various sources */ 954 data = cdev->firmware->data + sizeof(u32); 955 956 qed_dbg_pf_init(cdev); 957 } 958 959 memset(&tunn_info, 0, sizeof(tunn_info)); 960 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | 961 1 << QED_MODE_L2GRE_TUNN | 962 1 << QED_MODE_IPGRE_TUNN | 963 1 << QED_MODE_L2GENEVE_TUNN | 964 1 << QED_MODE_IPGENEVE_TUNN; 965 966 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; 967 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; 968 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; 969 970 /* Start the slowpath */ 971 memset(&hw_init_params, 0, sizeof(hw_init_params)); 972 hw_init_params.p_tunn = &tunn_info; 973 hw_init_params.b_hw_start = true; 974 hw_init_params.int_mode = cdev->int_params.out.int_mode; 975 hw_init_params.allow_npar_tx_switch = true; 976 hw_init_params.bin_fw_data = data; 977 978 memset(&drv_load_params, 0, sizeof(drv_load_params)); 979 drv_load_params.is_crash_kernel = is_kdump_kernel(); 980 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 981 drv_load_params.avoid_eng_reset = false; 982 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 983 hw_init_params.p_drv_load_params = &drv_load_params; 984 985 rc = qed_hw_init(cdev, &hw_init_params); 986 if (rc) 987 goto err2; 988 989 DP_INFO(cdev, 990 "HW initialization and function start completed successfully\n"); 991 992 /* Allocate LL2 interface if needed */ 993 if (QED_LEADING_HWFN(cdev)->using_ll2) { 994 rc = qed_ll2_alloc_if(cdev); 995 if (rc) 996 goto err3; 997 } 998 if (IS_PF(cdev)) { 999 hwfn = QED_LEADING_HWFN(cdev); 1000 drv_version.version = (params->drv_major << 24) | 1001 (params->drv_minor << 16) | 1002 (params->drv_rev << 8) | 1003 (params->drv_eng); 1004 strlcpy(drv_version.name, params->name, 1005 MCP_DRV_VER_STR_SIZE - 4); 1006 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1007 &drv_version); 1008 if (rc) { 1009 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1010 return rc; 1011 } 1012 } 1013 1014 qed_reset_vport_stats(cdev); 1015 1016 return 0; 1017 1018 err3: 1019 qed_hw_stop(cdev); 1020 err2: 1021 qed_hw_timers_stop_all(cdev); 1022 if (IS_PF(cdev)) 1023 qed_slowpath_irq_free(cdev); 1024 qed_free_stream_mem(cdev); 1025 qed_disable_msix(cdev); 1026 err1: 1027 qed_resc_free(cdev); 1028 err: 1029 if (IS_PF(cdev)) 1030 release_firmware(cdev->firmware); 1031 1032 if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt) 1033 qed_ptt_release(QED_LEADING_HWFN(cdev), 1034 QED_LEADING_HWFN(cdev)->p_ptp_ptt); 1035 1036 qed_iov_wq_stop(cdev, false); 1037 1038 return rc; 1039 } 1040 1041 static int qed_slowpath_stop(struct qed_dev *cdev) 1042 { 1043 if (!cdev) 1044 return -ENODEV; 1045 1046 qed_ll2_dealloc_if(cdev); 1047 1048 if (IS_PF(cdev)) { 1049 qed_ptt_release(QED_LEADING_HWFN(cdev), 1050 QED_LEADING_HWFN(cdev)->p_ptp_ptt); 1051 qed_free_stream_mem(cdev); 1052 if (IS_QED_ETH_IF(cdev)) 1053 qed_sriov_disable(cdev, true); 1054 1055 qed_nic_stop(cdev); 1056 qed_slowpath_irq_free(cdev); 1057 } 1058 1059 qed_disable_msix(cdev); 1060 1061 qed_resc_free(cdev); 1062 1063 qed_iov_wq_stop(cdev, true); 1064 1065 if (IS_PF(cdev)) 1066 release_firmware(cdev->firmware); 1067 1068 return 0; 1069 } 1070 1071 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], 1072 char ver_str[VER_SIZE]) 1073 { 1074 int i; 1075 1076 memcpy(cdev->name, name, NAME_SIZE); 1077 for_each_hwfn(cdev, i) 1078 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1079 1080 memcpy(cdev->ver_str, ver_str, VER_SIZE); 1081 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 1082 } 1083 1084 static u32 qed_sb_init(struct qed_dev *cdev, 1085 struct qed_sb_info *sb_info, 1086 void *sb_virt_addr, 1087 dma_addr_t sb_phy_addr, u16 sb_id, 1088 enum qed_sb_type type) 1089 { 1090 struct qed_hwfn *p_hwfn; 1091 struct qed_ptt *p_ptt; 1092 int hwfn_index; 1093 u16 rel_sb_id; 1094 u8 n_hwfns; 1095 u32 rc; 1096 1097 /* RoCE uses single engine and CMT uses two engines. When using both 1098 * we force only a single engine. Storage uses only engine 0 too. 1099 */ 1100 if (type == QED_SB_TYPE_L2_QUEUE) 1101 n_hwfns = cdev->num_hwfns; 1102 else 1103 n_hwfns = 1; 1104 1105 hwfn_index = sb_id % n_hwfns; 1106 p_hwfn = &cdev->hwfns[hwfn_index]; 1107 rel_sb_id = sb_id / n_hwfns; 1108 1109 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1110 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1111 hwfn_index, rel_sb_id, sb_id); 1112 1113 if (IS_PF(p_hwfn->cdev)) { 1114 p_ptt = qed_ptt_acquire(p_hwfn); 1115 if (!p_ptt) 1116 return -EBUSY; 1117 1118 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1119 sb_phy_addr, rel_sb_id); 1120 qed_ptt_release(p_hwfn, p_ptt); 1121 } else { 1122 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1123 sb_phy_addr, rel_sb_id); 1124 } 1125 1126 return rc; 1127 } 1128 1129 static u32 qed_sb_release(struct qed_dev *cdev, 1130 struct qed_sb_info *sb_info, u16 sb_id) 1131 { 1132 struct qed_hwfn *p_hwfn; 1133 int hwfn_index; 1134 u16 rel_sb_id; 1135 u32 rc; 1136 1137 hwfn_index = sb_id % cdev->num_hwfns; 1138 p_hwfn = &cdev->hwfns[hwfn_index]; 1139 rel_sb_id = sb_id / cdev->num_hwfns; 1140 1141 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1142 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1143 hwfn_index, rel_sb_id, sb_id); 1144 1145 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1146 1147 return rc; 1148 } 1149 1150 static bool qed_can_link_change(struct qed_dev *cdev) 1151 { 1152 return true; 1153 } 1154 1155 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1156 { 1157 struct qed_hwfn *hwfn; 1158 struct qed_mcp_link_params *link_params; 1159 struct qed_ptt *ptt; 1160 int rc; 1161 1162 if (!cdev) 1163 return -ENODEV; 1164 1165 /* The link should be set only once per PF */ 1166 hwfn = &cdev->hwfns[0]; 1167 1168 /* When VF wants to set link, force it to read the bulletin instead. 1169 * This mimics the PF behavior, where a noitification [both immediate 1170 * and possible later] would be generated when changing properties. 1171 */ 1172 if (IS_VF(cdev)) { 1173 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1174 return 0; 1175 } 1176 1177 ptt = qed_ptt_acquire(hwfn); 1178 if (!ptt) 1179 return -EBUSY; 1180 1181 link_params = qed_mcp_get_link_params(hwfn); 1182 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1183 link_params->speed.autoneg = params->autoneg; 1184 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1185 link_params->speed.advertised_speeds = 0; 1186 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || 1187 (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) 1188 link_params->speed.advertised_speeds |= 1189 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1190 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) 1191 link_params->speed.advertised_speeds |= 1192 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1193 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) 1194 link_params->speed.advertised_speeds |= 1195 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1196 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) 1197 link_params->speed.advertised_speeds |= 1198 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1199 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) 1200 link_params->speed.advertised_speeds |= 1201 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1202 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) 1203 link_params->speed.advertised_speeds |= 1204 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1205 } 1206 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1207 link_params->speed.forced_speed = params->forced_speed; 1208 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1209 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1210 link_params->pause.autoneg = true; 1211 else 1212 link_params->pause.autoneg = false; 1213 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1214 link_params->pause.forced_rx = true; 1215 else 1216 link_params->pause.forced_rx = false; 1217 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1218 link_params->pause.forced_tx = true; 1219 else 1220 link_params->pause.forced_tx = false; 1221 } 1222 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1223 switch (params->loopback_mode) { 1224 case QED_LINK_LOOPBACK_INT_PHY: 1225 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1226 break; 1227 case QED_LINK_LOOPBACK_EXT_PHY: 1228 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1229 break; 1230 case QED_LINK_LOOPBACK_EXT: 1231 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1232 break; 1233 case QED_LINK_LOOPBACK_MAC: 1234 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1235 break; 1236 default: 1237 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1238 break; 1239 } 1240 } 1241 1242 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1243 1244 qed_ptt_release(hwfn, ptt); 1245 1246 return rc; 1247 } 1248 1249 static int qed_get_port_type(u32 media_type) 1250 { 1251 int port_type; 1252 1253 switch (media_type) { 1254 case MEDIA_SFPP_10G_FIBER: 1255 case MEDIA_SFP_1G_FIBER: 1256 case MEDIA_XFP_FIBER: 1257 case MEDIA_MODULE_FIBER: 1258 case MEDIA_KR: 1259 port_type = PORT_FIBRE; 1260 break; 1261 case MEDIA_DA_TWINAX: 1262 port_type = PORT_DA; 1263 break; 1264 case MEDIA_BASE_T: 1265 port_type = PORT_TP; 1266 break; 1267 case MEDIA_NOT_PRESENT: 1268 port_type = PORT_NONE; 1269 break; 1270 case MEDIA_UNSPECIFIED: 1271 default: 1272 port_type = PORT_OTHER; 1273 break; 1274 } 1275 return port_type; 1276 } 1277 1278 static int qed_get_link_data(struct qed_hwfn *hwfn, 1279 struct qed_mcp_link_params *params, 1280 struct qed_mcp_link_state *link, 1281 struct qed_mcp_link_capabilities *link_caps) 1282 { 1283 void *p; 1284 1285 if (!IS_PF(hwfn->cdev)) { 1286 qed_vf_get_link_params(hwfn, params); 1287 qed_vf_get_link_state(hwfn, link); 1288 qed_vf_get_link_caps(hwfn, link_caps); 1289 1290 return 0; 1291 } 1292 1293 p = qed_mcp_get_link_params(hwfn); 1294 if (!p) 1295 return -ENXIO; 1296 memcpy(params, p, sizeof(*params)); 1297 1298 p = qed_mcp_get_link_state(hwfn); 1299 if (!p) 1300 return -ENXIO; 1301 memcpy(link, p, sizeof(*link)); 1302 1303 p = qed_mcp_get_link_capabilities(hwfn); 1304 if (!p) 1305 return -ENXIO; 1306 memcpy(link_caps, p, sizeof(*link_caps)); 1307 1308 return 0; 1309 } 1310 1311 static void qed_fill_link(struct qed_hwfn *hwfn, 1312 struct qed_link_output *if_link) 1313 { 1314 struct qed_mcp_link_params params; 1315 struct qed_mcp_link_state link; 1316 struct qed_mcp_link_capabilities link_caps; 1317 u32 media_type; 1318 1319 memset(if_link, 0, sizeof(*if_link)); 1320 1321 /* Prepare source inputs */ 1322 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1323 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1324 return; 1325 } 1326 1327 /* Set the link parameters to pass to protocol driver */ 1328 if (link.link_up) 1329 if_link->link_up = true; 1330 1331 /* TODO - at the moment assume supported and advertised speed equal */ 1332 if_link->supported_caps = QED_LM_FIBRE_BIT; 1333 if (params.speed.autoneg) 1334 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1335 if (params.pause.autoneg || 1336 (params.pause.forced_rx && params.pause.forced_tx)) 1337 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1338 if (params.pause.autoneg || params.pause.forced_rx || 1339 params.pause.forced_tx) 1340 if_link->supported_caps |= QED_LM_Pause_BIT; 1341 1342 if_link->advertised_caps = if_link->supported_caps; 1343 if (params.speed.advertised_speeds & 1344 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1345 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | 1346 QED_LM_1000baseT_Full_BIT; 1347 if (params.speed.advertised_speeds & 1348 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1349 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; 1350 if (params.speed.advertised_speeds & 1351 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1352 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; 1353 if (params.speed.advertised_speeds & 1354 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1355 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; 1356 if (params.speed.advertised_speeds & 1357 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1358 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; 1359 if (params.speed.advertised_speeds & 1360 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1361 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; 1362 1363 if (link_caps.speed_capabilities & 1364 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1365 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | 1366 QED_LM_1000baseT_Full_BIT; 1367 if (link_caps.speed_capabilities & 1368 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1369 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; 1370 if (link_caps.speed_capabilities & 1371 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1372 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; 1373 if (link_caps.speed_capabilities & 1374 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1375 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; 1376 if (link_caps.speed_capabilities & 1377 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1378 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; 1379 if (link_caps.speed_capabilities & 1380 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1381 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; 1382 1383 if (link.link_up) 1384 if_link->speed = link.speed; 1385 1386 /* TODO - fill duplex properly */ 1387 if_link->duplex = DUPLEX_FULL; 1388 qed_mcp_get_media_type(hwfn->cdev, &media_type); 1389 if_link->port = qed_get_port_type(media_type); 1390 1391 if_link->autoneg = params.speed.autoneg; 1392 1393 if (params.pause.autoneg) 1394 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1395 if (params.pause.forced_rx) 1396 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1397 if (params.pause.forced_tx) 1398 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1399 1400 /* Link partner capabilities */ 1401 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) 1402 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; 1403 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) 1404 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1405 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1406 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1407 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1408 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1409 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1410 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1411 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1412 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1413 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1414 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1415 1416 if (link.an_complete) 1417 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1418 1419 if (link.partner_adv_pause) 1420 if_link->lp_caps |= QED_LM_Pause_BIT; 1421 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1422 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1423 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1424 } 1425 1426 static void qed_get_current_link(struct qed_dev *cdev, 1427 struct qed_link_output *if_link) 1428 { 1429 int i; 1430 1431 qed_fill_link(&cdev->hwfns[0], if_link); 1432 1433 for_each_hwfn(cdev, i) 1434 qed_inform_vf_link_state(&cdev->hwfns[i]); 1435 } 1436 1437 void qed_link_update(struct qed_hwfn *hwfn) 1438 { 1439 void *cookie = hwfn->cdev->ops_cookie; 1440 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1441 struct qed_link_output if_link; 1442 1443 qed_fill_link(hwfn, &if_link); 1444 qed_inform_vf_link_state(hwfn); 1445 1446 if (IS_LEAD_HWFN(hwfn) && cookie) 1447 op->link_update(cookie, &if_link); 1448 } 1449 1450 static int qed_drain(struct qed_dev *cdev) 1451 { 1452 struct qed_hwfn *hwfn; 1453 struct qed_ptt *ptt; 1454 int i, rc; 1455 1456 if (IS_VF(cdev)) 1457 return 0; 1458 1459 for_each_hwfn(cdev, i) { 1460 hwfn = &cdev->hwfns[i]; 1461 ptt = qed_ptt_acquire(hwfn); 1462 if (!ptt) { 1463 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1464 return -EBUSY; 1465 } 1466 rc = qed_mcp_drain(hwfn, ptt); 1467 if (rc) 1468 return rc; 1469 qed_ptt_release(hwfn, ptt); 1470 } 1471 1472 return 0; 1473 } 1474 1475 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) 1476 { 1477 *rx_coal = cdev->rx_coalesce_usecs; 1478 *tx_coal = cdev->tx_coalesce_usecs; 1479 } 1480 1481 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 1482 u8 qid, u16 sb_id) 1483 { 1484 struct qed_hwfn *hwfn; 1485 struct qed_ptt *ptt; 1486 int hwfn_index; 1487 int status = 0; 1488 1489 hwfn_index = qid % cdev->num_hwfns; 1490 hwfn = &cdev->hwfns[hwfn_index]; 1491 ptt = qed_ptt_acquire(hwfn); 1492 if (!ptt) 1493 return -EAGAIN; 1494 1495 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, 1496 qid / cdev->num_hwfns, sb_id); 1497 if (status) 1498 goto out; 1499 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, 1500 qid / cdev->num_hwfns, sb_id); 1501 out: 1502 qed_ptt_release(hwfn, ptt); 1503 1504 return status; 1505 } 1506 1507 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 1508 { 1509 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1510 struct qed_ptt *ptt; 1511 int status = 0; 1512 1513 ptt = qed_ptt_acquire(hwfn); 1514 if (!ptt) 1515 return -EAGAIN; 1516 1517 status = qed_mcp_set_led(hwfn, ptt, mode); 1518 1519 qed_ptt_release(hwfn, ptt); 1520 1521 return status; 1522 } 1523 1524 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 1525 { 1526 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1527 struct qed_ptt *ptt; 1528 int rc = 0; 1529 1530 if (IS_VF(cdev)) 1531 return 0; 1532 1533 ptt = qed_ptt_acquire(hwfn); 1534 if (!ptt) 1535 return -EAGAIN; 1536 1537 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 1538 : QED_OV_WOL_DISABLED); 1539 if (rc) 1540 goto out; 1541 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1542 1543 out: 1544 qed_ptt_release(hwfn, ptt); 1545 return rc; 1546 } 1547 1548 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 1549 { 1550 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1551 struct qed_ptt *ptt; 1552 int status = 0; 1553 1554 if (IS_VF(cdev)) 1555 return 0; 1556 1557 ptt = qed_ptt_acquire(hwfn); 1558 if (!ptt) 1559 return -EAGAIN; 1560 1561 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 1562 QED_OV_DRIVER_STATE_ACTIVE : 1563 QED_OV_DRIVER_STATE_DISABLED); 1564 1565 qed_ptt_release(hwfn, ptt); 1566 1567 return status; 1568 } 1569 1570 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 1571 { 1572 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1573 struct qed_ptt *ptt; 1574 int status = 0; 1575 1576 if (IS_VF(cdev)) 1577 return 0; 1578 1579 ptt = qed_ptt_acquire(hwfn); 1580 if (!ptt) 1581 return -EAGAIN; 1582 1583 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 1584 if (status) 1585 goto out; 1586 1587 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1588 1589 out: 1590 qed_ptt_release(hwfn, ptt); 1591 return status; 1592 } 1593 1594 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 1595 { 1596 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1597 struct qed_ptt *ptt; 1598 int status = 0; 1599 1600 if (IS_VF(cdev)) 1601 return 0; 1602 1603 ptt = qed_ptt_acquire(hwfn); 1604 if (!ptt) 1605 return -EAGAIN; 1606 1607 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 1608 if (status) 1609 goto out; 1610 1611 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1612 1613 out: 1614 qed_ptt_release(hwfn, ptt); 1615 return status; 1616 } 1617 1618 static struct qed_selftest_ops qed_selftest_ops_pass = { 1619 .selftest_memory = &qed_selftest_memory, 1620 .selftest_interrupt = &qed_selftest_interrupt, 1621 .selftest_register = &qed_selftest_register, 1622 .selftest_clock = &qed_selftest_clock, 1623 .selftest_nvram = &qed_selftest_nvram, 1624 }; 1625 1626 const struct qed_common_ops qed_common_ops_pass = { 1627 .selftest = &qed_selftest_ops_pass, 1628 .probe = &qed_probe, 1629 .remove = &qed_remove, 1630 .set_power_state = &qed_set_power_state, 1631 .set_id = &qed_set_id, 1632 .update_pf_params = &qed_update_pf_params, 1633 .slowpath_start = &qed_slowpath_start, 1634 .slowpath_stop = &qed_slowpath_stop, 1635 .set_fp_int = &qed_set_int_fp, 1636 .get_fp_int = &qed_get_int_fp, 1637 .sb_init = &qed_sb_init, 1638 .sb_release = &qed_sb_release, 1639 .simd_handler_config = &qed_simd_handler_config, 1640 .simd_handler_clean = &qed_simd_handler_clean, 1641 .dbg_grc = &qed_dbg_grc, 1642 .dbg_grc_size = &qed_dbg_grc_size, 1643 .can_link_change = &qed_can_link_change, 1644 .set_link = &qed_set_link, 1645 .get_link = &qed_get_current_link, 1646 .drain = &qed_drain, 1647 .update_msglvl = &qed_init_dp, 1648 .dbg_all_data = &qed_dbg_all_data, 1649 .dbg_all_data_size = &qed_dbg_all_data_size, 1650 .chain_alloc = &qed_chain_alloc, 1651 .chain_free = &qed_chain_free, 1652 .get_coalesce = &qed_get_coalesce, 1653 .set_coalesce = &qed_set_coalesce, 1654 .set_led = &qed_set_led, 1655 .update_drv_state = &qed_update_drv_state, 1656 .update_mac = &qed_update_mac, 1657 .update_mtu = &qed_update_mtu, 1658 .update_wol = &qed_update_wol, 1659 }; 1660 1661 void qed_get_protocol_stats(struct qed_dev *cdev, 1662 enum qed_mcp_protocol_type type, 1663 union qed_mcp_protocol_stats *stats) 1664 { 1665 struct qed_eth_stats eth_stats; 1666 1667 memset(stats, 0, sizeof(*stats)); 1668 1669 switch (type) { 1670 case QED_MCP_LAN_STATS: 1671 qed_get_vport_stats(cdev, ð_stats); 1672 stats->lan_stats.ucast_rx_pkts = 1673 eth_stats.common.rx_ucast_pkts; 1674 stats->lan_stats.ucast_tx_pkts = 1675 eth_stats.common.tx_ucast_pkts; 1676 stats->lan_stats.fcs_err = -1; 1677 break; 1678 case QED_MCP_FCOE_STATS: 1679 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 1680 break; 1681 default: 1682 DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1683 return; 1684 } 1685 } 1686