1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/version.h> 38 #include <linux/delay.h> 39 #include <asm/byteorder.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/string.h> 42 #include <linux/module.h> 43 #include <linux/interrupt.h> 44 #include <linux/workqueue.h> 45 #include <linux/ethtool.h> 46 #include <linux/etherdevice.h> 47 #include <linux/vmalloc.h> 48 #include <linux/qed/qed_if.h> 49 #include <linux/qed/qed_ll2_if.h> 50 51 #include "qed.h" 52 #include "qed_sriov.h" 53 #include "qed_sp.h" 54 #include "qed_dev_api.h" 55 #include "qed_ll2.h" 56 #include "qed_fcoe.h" 57 #include "qed_mcp.h" 58 #include "qed_hw.h" 59 #include "qed_selftest.h" 60 #include "qed_debug.h" 61 62 #define QED_ROCE_QPS (8192) 63 #define QED_ROCE_DPIS (8) 64 65 static char version[] = 66 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 67 68 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 69 MODULE_LICENSE("GPL"); 70 MODULE_VERSION(DRV_MODULE_VERSION); 71 72 #define FW_FILE_VERSION \ 73 __stringify(FW_MAJOR_VERSION) "." \ 74 __stringify(FW_MINOR_VERSION) "." \ 75 __stringify(FW_REVISION_VERSION) "." \ 76 __stringify(FW_ENGINEERING_VERSION) 77 78 #define QED_FW_FILE_NAME \ 79 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 80 81 MODULE_FIRMWARE(QED_FW_FILE_NAME); 82 83 static int __init qed_init(void) 84 { 85 pr_info("%s", version); 86 87 return 0; 88 } 89 90 static void __exit qed_cleanup(void) 91 { 92 pr_notice("qed_cleanup called\n"); 93 } 94 95 module_init(qed_init); 96 module_exit(qed_cleanup); 97 98 /* Check if the DMA controller on the machine can properly handle the DMA 99 * addressing required by the device. 100 */ 101 static int qed_set_coherency_mask(struct qed_dev *cdev) 102 { 103 struct device *dev = &cdev->pdev->dev; 104 105 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 106 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 107 DP_NOTICE(cdev, 108 "Can't request 64-bit consistent allocations\n"); 109 return -EIO; 110 } 111 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 112 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 113 return -EIO; 114 } 115 116 return 0; 117 } 118 119 static void qed_free_pci(struct qed_dev *cdev) 120 { 121 struct pci_dev *pdev = cdev->pdev; 122 123 if (cdev->doorbells) 124 iounmap(cdev->doorbells); 125 if (cdev->regview) 126 iounmap(cdev->regview); 127 if (atomic_read(&pdev->enable_cnt) == 1) 128 pci_release_regions(pdev); 129 130 pci_disable_device(pdev); 131 } 132 133 #define PCI_REVISION_ID_ERROR_VAL 0xff 134 135 /* Performs PCI initializations as well as initializing PCI-related parameters 136 * in the device structrue. Returns 0 in case of success. 137 */ 138 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 139 { 140 u8 rev_id; 141 int rc; 142 143 cdev->pdev = pdev; 144 145 rc = pci_enable_device(pdev); 146 if (rc) { 147 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 148 goto err0; 149 } 150 151 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 152 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 153 rc = -EIO; 154 goto err1; 155 } 156 157 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 158 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 159 rc = -EIO; 160 goto err1; 161 } 162 163 if (atomic_read(&pdev->enable_cnt) == 1) { 164 rc = pci_request_regions(pdev, "qed"); 165 if (rc) { 166 DP_NOTICE(cdev, 167 "Failed to request PCI memory resources\n"); 168 goto err1; 169 } 170 pci_set_master(pdev); 171 pci_save_state(pdev); 172 } 173 174 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 175 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 176 DP_NOTICE(cdev, 177 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 178 rev_id); 179 rc = -ENODEV; 180 goto err2; 181 } 182 if (!pci_is_pcie(pdev)) { 183 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 184 rc = -EIO; 185 goto err2; 186 } 187 188 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 189 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 190 DP_NOTICE(cdev, "Cannot find power management capability\n"); 191 192 rc = qed_set_coherency_mask(cdev); 193 if (rc) 194 goto err2; 195 196 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 197 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 198 cdev->pci_params.irq = pdev->irq; 199 200 cdev->regview = pci_ioremap_bar(pdev, 0); 201 if (!cdev->regview) { 202 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 203 rc = -ENOMEM; 204 goto err2; 205 } 206 207 if (IS_PF(cdev)) { 208 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 209 cdev->db_size = pci_resource_len(cdev->pdev, 2); 210 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 211 if (!cdev->doorbells) { 212 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 213 return -ENOMEM; 214 } 215 } 216 217 return 0; 218 219 err2: 220 pci_release_regions(pdev); 221 err1: 222 pci_disable_device(pdev); 223 err0: 224 return rc; 225 } 226 227 int qed_fill_dev_info(struct qed_dev *cdev, 228 struct qed_dev_info *dev_info) 229 { 230 struct qed_ptt *ptt; 231 232 memset(dev_info, 0, sizeof(struct qed_dev_info)); 233 234 dev_info->num_hwfns = cdev->num_hwfns; 235 dev_info->pci_mem_start = cdev->pci_params.mem_start; 236 dev_info->pci_mem_end = cdev->pci_params.mem_end; 237 dev_info->pci_irq = cdev->pci_params.irq; 238 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == 239 QED_PCI_ETH_ROCE); 240 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 241 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 242 243 if (IS_PF(cdev)) { 244 dev_info->fw_major = FW_MAJOR_VERSION; 245 dev_info->fw_minor = FW_MINOR_VERSION; 246 dev_info->fw_rev = FW_REVISION_VERSION; 247 dev_info->fw_eng = FW_ENGINEERING_VERSION; 248 dev_info->mf_mode = cdev->mf_mode; 249 dev_info->tx_switching = true; 250 251 if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == 252 QED_WOL_SUPPORT_PME) 253 dev_info->wol_support = true; 254 } else { 255 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 256 &dev_info->fw_minor, &dev_info->fw_rev, 257 &dev_info->fw_eng); 258 } 259 260 if (IS_PF(cdev)) { 261 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 262 if (ptt) { 263 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 264 &dev_info->mfw_rev, NULL); 265 266 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 267 &dev_info->flash_size); 268 269 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 270 } 271 } else { 272 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 273 &dev_info->mfw_rev, NULL); 274 } 275 276 dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; 277 278 return 0; 279 } 280 281 static void qed_free_cdev(struct qed_dev *cdev) 282 { 283 kfree((void *)cdev); 284 } 285 286 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 287 { 288 struct qed_dev *cdev; 289 290 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 291 if (!cdev) 292 return cdev; 293 294 qed_init_struct(cdev); 295 296 return cdev; 297 } 298 299 /* Sets the requested power state */ 300 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 301 { 302 if (!cdev) 303 return -ENODEV; 304 305 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 306 return 0; 307 } 308 309 /* probing */ 310 static struct qed_dev *qed_probe(struct pci_dev *pdev, 311 struct qed_probe_params *params) 312 { 313 struct qed_dev *cdev; 314 int rc; 315 316 cdev = qed_alloc_cdev(pdev); 317 if (!cdev) 318 goto err0; 319 320 cdev->protocol = params->protocol; 321 322 if (params->is_vf) 323 cdev->b_is_vf = true; 324 325 qed_init_dp(cdev, params->dp_module, params->dp_level); 326 327 rc = qed_init_pci(cdev, pdev); 328 if (rc) { 329 DP_ERR(cdev, "init pci failed\n"); 330 goto err1; 331 } 332 DP_INFO(cdev, "PCI init completed successfully\n"); 333 334 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 335 if (rc) { 336 DP_ERR(cdev, "hw prepare failed\n"); 337 goto err2; 338 } 339 340 DP_INFO(cdev, "qed_probe completed successffuly\n"); 341 342 return cdev; 343 344 err2: 345 qed_free_pci(cdev); 346 err1: 347 qed_free_cdev(cdev); 348 err0: 349 return NULL; 350 } 351 352 static void qed_remove(struct qed_dev *cdev) 353 { 354 if (!cdev) 355 return; 356 357 qed_hw_remove(cdev); 358 359 qed_free_pci(cdev); 360 361 qed_set_power_state(cdev, PCI_D3hot); 362 363 qed_free_cdev(cdev); 364 } 365 366 static void qed_disable_msix(struct qed_dev *cdev) 367 { 368 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 369 pci_disable_msix(cdev->pdev); 370 kfree(cdev->int_params.msix_table); 371 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 372 pci_disable_msi(cdev->pdev); 373 } 374 375 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 376 } 377 378 static int qed_enable_msix(struct qed_dev *cdev, 379 struct qed_int_params *int_params) 380 { 381 int i, rc, cnt; 382 383 cnt = int_params->in.num_vectors; 384 385 for (i = 0; i < cnt; i++) 386 int_params->msix_table[i].entry = i; 387 388 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 389 int_params->in.min_msix_cnt, cnt); 390 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 391 (rc % cdev->num_hwfns)) { 392 pci_disable_msix(cdev->pdev); 393 394 /* If fastpath is initialized, we need at least one interrupt 395 * per hwfn [and the slow path interrupts]. New requested number 396 * should be a multiple of the number of hwfns. 397 */ 398 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 399 DP_NOTICE(cdev, 400 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 401 cnt, int_params->in.num_vectors); 402 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 403 cnt); 404 if (!rc) 405 rc = cnt; 406 } 407 408 if (rc > 0) { 409 /* MSI-x configuration was achieved */ 410 int_params->out.int_mode = QED_INT_MODE_MSIX; 411 int_params->out.num_vectors = rc; 412 rc = 0; 413 } else { 414 DP_NOTICE(cdev, 415 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 416 cnt, rc); 417 } 418 419 return rc; 420 } 421 422 /* This function outputs the int mode and the number of enabled msix vector */ 423 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 424 { 425 struct qed_int_params *int_params = &cdev->int_params; 426 struct msix_entry *tbl; 427 int rc = 0, cnt; 428 429 switch (int_params->in.int_mode) { 430 case QED_INT_MODE_MSIX: 431 /* Allocate MSIX table */ 432 cnt = int_params->in.num_vectors; 433 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 434 if (!int_params->msix_table) { 435 rc = -ENOMEM; 436 goto out; 437 } 438 439 /* Enable MSIX */ 440 rc = qed_enable_msix(cdev, int_params); 441 if (!rc) 442 goto out; 443 444 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 445 kfree(int_params->msix_table); 446 if (force_mode) 447 goto out; 448 /* Fallthrough */ 449 450 case QED_INT_MODE_MSI: 451 if (cdev->num_hwfns == 1) { 452 rc = pci_enable_msi(cdev->pdev); 453 if (!rc) { 454 int_params->out.int_mode = QED_INT_MODE_MSI; 455 goto out; 456 } 457 458 DP_NOTICE(cdev, "Failed to enable MSI\n"); 459 if (force_mode) 460 goto out; 461 } 462 /* Fallthrough */ 463 464 case QED_INT_MODE_INTA: 465 int_params->out.int_mode = QED_INT_MODE_INTA; 466 rc = 0; 467 goto out; 468 default: 469 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 470 int_params->in.int_mode); 471 rc = -EINVAL; 472 } 473 474 out: 475 if (!rc) 476 DP_INFO(cdev, "Using %s interrupts\n", 477 int_params->out.int_mode == QED_INT_MODE_INTA ? 478 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 479 "MSI" : "MSIX"); 480 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 481 482 return rc; 483 } 484 485 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 486 int index, void(*handler)(void *)) 487 { 488 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 489 int relative_idx = index / cdev->num_hwfns; 490 491 hwfn->simd_proto_handler[relative_idx].func = handler; 492 hwfn->simd_proto_handler[relative_idx].token = token; 493 } 494 495 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 496 { 497 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 498 int relative_idx = index / cdev->num_hwfns; 499 500 memset(&hwfn->simd_proto_handler[relative_idx], 0, 501 sizeof(struct qed_simd_fp_handler)); 502 } 503 504 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 505 { 506 tasklet_schedule((struct tasklet_struct *)tasklet); 507 return IRQ_HANDLED; 508 } 509 510 static irqreturn_t qed_single_int(int irq, void *dev_instance) 511 { 512 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 513 struct qed_hwfn *hwfn; 514 irqreturn_t rc = IRQ_NONE; 515 u64 status; 516 int i, j; 517 518 for (i = 0; i < cdev->num_hwfns; i++) { 519 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 520 521 if (!status) 522 continue; 523 524 hwfn = &cdev->hwfns[i]; 525 526 /* Slowpath interrupt */ 527 if (unlikely(status & 0x1)) { 528 tasklet_schedule(hwfn->sp_dpc); 529 status &= ~0x1; 530 rc = IRQ_HANDLED; 531 } 532 533 /* Fastpath interrupts */ 534 for (j = 0; j < 64; j++) { 535 if ((0x2ULL << j) & status) { 536 hwfn->simd_proto_handler[j].func( 537 hwfn->simd_proto_handler[j].token); 538 status &= ~(0x2ULL << j); 539 rc = IRQ_HANDLED; 540 } 541 } 542 543 if (unlikely(status)) 544 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 545 "got an unknown interrupt status 0x%llx\n", 546 status); 547 } 548 549 return rc; 550 } 551 552 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 553 { 554 struct qed_dev *cdev = hwfn->cdev; 555 u32 int_mode; 556 int rc = 0; 557 u8 id; 558 559 int_mode = cdev->int_params.out.int_mode; 560 if (int_mode == QED_INT_MODE_MSIX) { 561 id = hwfn->my_id; 562 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 563 id, cdev->pdev->bus->number, 564 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 565 rc = request_irq(cdev->int_params.msix_table[id].vector, 566 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 567 } else { 568 unsigned long flags = 0; 569 570 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 571 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 572 PCI_FUNC(cdev->pdev->devfn)); 573 574 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 575 flags |= IRQF_SHARED; 576 577 rc = request_irq(cdev->pdev->irq, qed_single_int, 578 flags, cdev->name, cdev); 579 } 580 581 if (rc) 582 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 583 else 584 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 585 "Requested slowpath %s\n", 586 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 587 588 return rc; 589 } 590 591 static void qed_slowpath_irq_free(struct qed_dev *cdev) 592 { 593 int i; 594 595 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 596 for_each_hwfn(cdev, i) { 597 if (!cdev->hwfns[i].b_int_requested) 598 break; 599 synchronize_irq(cdev->int_params.msix_table[i].vector); 600 free_irq(cdev->int_params.msix_table[i].vector, 601 cdev->hwfns[i].sp_dpc); 602 } 603 } else { 604 if (QED_LEADING_HWFN(cdev)->b_int_requested) 605 free_irq(cdev->pdev->irq, cdev); 606 } 607 qed_int_disable_post_isr_release(cdev); 608 } 609 610 static int qed_nic_stop(struct qed_dev *cdev) 611 { 612 int i, rc; 613 614 rc = qed_hw_stop(cdev); 615 616 for (i = 0; i < cdev->num_hwfns; i++) { 617 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 618 619 if (p_hwfn->b_sp_dpc_enabled) { 620 tasklet_disable(p_hwfn->sp_dpc); 621 p_hwfn->b_sp_dpc_enabled = false; 622 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 623 "Disabled sp taskelt [hwfn %d] at %p\n", 624 i, p_hwfn->sp_dpc); 625 } 626 } 627 628 qed_dbg_pf_exit(cdev); 629 630 return rc; 631 } 632 633 static int qed_nic_reset(struct qed_dev *cdev) 634 { 635 int rc; 636 637 rc = qed_hw_reset(cdev); 638 if (rc) 639 return rc; 640 641 qed_resc_free(cdev); 642 643 return 0; 644 } 645 646 static int qed_nic_setup(struct qed_dev *cdev) 647 { 648 int rc, i; 649 650 /* Determine if interface is going to require LL2 */ 651 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 652 for (i = 0; i < cdev->num_hwfns; i++) { 653 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 654 655 p_hwfn->using_ll2 = true; 656 } 657 } 658 659 rc = qed_resc_alloc(cdev); 660 if (rc) 661 return rc; 662 663 DP_INFO(cdev, "Allocated qed resources\n"); 664 665 qed_resc_setup(cdev); 666 667 return rc; 668 } 669 670 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 671 { 672 int limit = 0; 673 674 /* Mark the fastpath as free/used */ 675 cdev->int_params.fp_initialized = cnt ? true : false; 676 677 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 678 limit = cdev->num_hwfns * 63; 679 else if (cdev->int_params.fp_msix_cnt) 680 limit = cdev->int_params.fp_msix_cnt; 681 682 if (!limit) 683 return -ENOMEM; 684 685 return min_t(int, cnt, limit); 686 } 687 688 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 689 { 690 memset(info, 0, sizeof(struct qed_int_info)); 691 692 if (!cdev->int_params.fp_initialized) { 693 DP_INFO(cdev, 694 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 695 return -EINVAL; 696 } 697 698 /* Need to expose only MSI-X information; Single IRQ is handled solely 699 * by qed. 700 */ 701 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 702 int msix_base = cdev->int_params.fp_msix_base; 703 704 info->msix_cnt = cdev->int_params.fp_msix_cnt; 705 info->msix = &cdev->int_params.msix_table[msix_base]; 706 } 707 708 return 0; 709 } 710 711 static int qed_slowpath_setup_int(struct qed_dev *cdev, 712 enum qed_int_mode int_mode) 713 { 714 struct qed_sb_cnt_info sb_cnt_info; 715 int num_l2_queues = 0; 716 int rc; 717 int i; 718 719 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 720 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 721 return -EINVAL; 722 } 723 724 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 725 cdev->int_params.in.int_mode = int_mode; 726 for_each_hwfn(cdev, i) { 727 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 728 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 729 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; 730 cdev->int_params.in.num_vectors++; /* slowpath */ 731 } 732 733 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 734 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 735 736 rc = qed_set_int_mode(cdev, false); 737 if (rc) { 738 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 739 return rc; 740 } 741 742 cdev->int_params.fp_msix_base = cdev->num_hwfns; 743 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 744 cdev->num_hwfns; 745 746 if (!IS_ENABLED(CONFIG_QED_RDMA)) 747 return 0; 748 749 for_each_hwfn(cdev, i) 750 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 751 752 DP_VERBOSE(cdev, QED_MSG_RDMA, 753 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 754 cdev->int_params.fp_msix_cnt, num_l2_queues); 755 756 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 757 cdev->int_params.rdma_msix_cnt = 758 (cdev->int_params.fp_msix_cnt - num_l2_queues) 759 / cdev->num_hwfns; 760 cdev->int_params.rdma_msix_base = 761 cdev->int_params.fp_msix_base + num_l2_queues; 762 cdev->int_params.fp_msix_cnt = num_l2_queues; 763 } else { 764 cdev->int_params.rdma_msix_cnt = 0; 765 } 766 767 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 768 cdev->int_params.rdma_msix_cnt, 769 cdev->int_params.rdma_msix_base); 770 771 return 0; 772 } 773 774 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 775 { 776 int rc; 777 778 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 779 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 780 781 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 782 &cdev->int_params.in.num_vectors); 783 if (cdev->num_hwfns > 1) { 784 u8 vectors = 0; 785 786 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 787 cdev->int_params.in.num_vectors += vectors; 788 } 789 790 /* We want a minimum of one fastpath vector per vf hwfn */ 791 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 792 793 rc = qed_set_int_mode(cdev, true); 794 if (rc) 795 return rc; 796 797 cdev->int_params.fp_msix_base = 0; 798 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 799 800 return 0; 801 } 802 803 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 804 u8 *input_buf, u32 max_size, u8 *unzip_buf) 805 { 806 int rc; 807 808 p_hwfn->stream->next_in = input_buf; 809 p_hwfn->stream->avail_in = input_len; 810 p_hwfn->stream->next_out = unzip_buf; 811 p_hwfn->stream->avail_out = max_size; 812 813 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 814 815 if (rc != Z_OK) { 816 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 817 rc); 818 return 0; 819 } 820 821 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 822 zlib_inflateEnd(p_hwfn->stream); 823 824 if (rc != Z_OK && rc != Z_STREAM_END) { 825 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 826 p_hwfn->stream->msg, rc); 827 return 0; 828 } 829 830 return p_hwfn->stream->total_out / 4; 831 } 832 833 static int qed_alloc_stream_mem(struct qed_dev *cdev) 834 { 835 int i; 836 void *workspace; 837 838 for_each_hwfn(cdev, i) { 839 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 840 841 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 842 if (!p_hwfn->stream) 843 return -ENOMEM; 844 845 workspace = vzalloc(zlib_inflate_workspacesize()); 846 if (!workspace) 847 return -ENOMEM; 848 p_hwfn->stream->workspace = workspace; 849 } 850 851 return 0; 852 } 853 854 static void qed_free_stream_mem(struct qed_dev *cdev) 855 { 856 int i; 857 858 for_each_hwfn(cdev, i) { 859 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 860 861 if (!p_hwfn->stream) 862 return; 863 864 vfree(p_hwfn->stream->workspace); 865 kfree(p_hwfn->stream); 866 } 867 } 868 869 static void qed_update_pf_params(struct qed_dev *cdev, 870 struct qed_pf_params *params) 871 { 872 int i; 873 874 if (IS_ENABLED(CONFIG_QED_RDMA)) { 875 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 876 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 877 /* divide by 3 the MRs to avoid MF ILT overflow */ 878 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; 879 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 880 } 881 882 /* In case we might support RDMA, don't allow qede to be greedy 883 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. 884 */ 885 if (QED_LEADING_HWFN(cdev)->hw_info.personality == 886 QED_PCI_ETH_ROCE) { 887 u16 *num_cons; 888 889 num_cons = ¶ms->eth_pf_params.num_cons; 890 *num_cons = min_t(u16, *num_cons, 192); 891 } 892 893 for (i = 0; i < cdev->num_hwfns; i++) { 894 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 895 896 p_hwfn->pf_params = *params; 897 } 898 } 899 900 static int qed_slowpath_start(struct qed_dev *cdev, 901 struct qed_slowpath_params *params) 902 { 903 struct qed_tunn_start_params tunn_info; 904 struct qed_mcp_drv_version drv_version; 905 const u8 *data = NULL; 906 struct qed_hwfn *hwfn; 907 struct qed_ptt *p_ptt; 908 int rc = -EINVAL; 909 910 if (qed_iov_wq_start(cdev)) 911 goto err; 912 913 if (IS_PF(cdev)) { 914 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 915 &cdev->pdev->dev); 916 if (rc) { 917 DP_NOTICE(cdev, 918 "Failed to find fw file - /lib/firmware/%s\n", 919 QED_FW_FILE_NAME); 920 goto err; 921 } 922 923 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 924 if (p_ptt) { 925 QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt; 926 } else { 927 DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n"); 928 goto err; 929 } 930 } 931 932 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 933 rc = qed_nic_setup(cdev); 934 if (rc) 935 goto err; 936 937 if (IS_PF(cdev)) 938 rc = qed_slowpath_setup_int(cdev, params->int_mode); 939 else 940 rc = qed_slowpath_vf_setup_int(cdev); 941 if (rc) 942 goto err1; 943 944 if (IS_PF(cdev)) { 945 /* Allocate stream for unzipping */ 946 rc = qed_alloc_stream_mem(cdev); 947 if (rc) 948 goto err2; 949 950 /* First Dword used to diffrentiate between various sources */ 951 data = cdev->firmware->data + sizeof(u32); 952 953 qed_dbg_pf_init(cdev); 954 } 955 956 memset(&tunn_info, 0, sizeof(tunn_info)); 957 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | 958 1 << QED_MODE_L2GRE_TUNN | 959 1 << QED_MODE_IPGRE_TUNN | 960 1 << QED_MODE_L2GENEVE_TUNN | 961 1 << QED_MODE_IPGENEVE_TUNN; 962 963 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; 964 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; 965 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; 966 967 /* Start the slowpath */ 968 rc = qed_hw_init(cdev, &tunn_info, true, 969 cdev->int_params.out.int_mode, 970 true, data); 971 if (rc) 972 goto err2; 973 974 DP_INFO(cdev, 975 "HW initialization and function start completed successfully\n"); 976 977 /* Allocate LL2 interface if needed */ 978 if (QED_LEADING_HWFN(cdev)->using_ll2) { 979 rc = qed_ll2_alloc_if(cdev); 980 if (rc) 981 goto err3; 982 } 983 if (IS_PF(cdev)) { 984 hwfn = QED_LEADING_HWFN(cdev); 985 drv_version.version = (params->drv_major << 24) | 986 (params->drv_minor << 16) | 987 (params->drv_rev << 8) | 988 (params->drv_eng); 989 strlcpy(drv_version.name, params->name, 990 MCP_DRV_VER_STR_SIZE - 4); 991 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 992 &drv_version); 993 if (rc) { 994 DP_NOTICE(cdev, "Failed sending drv version command\n"); 995 return rc; 996 } 997 } 998 999 qed_reset_vport_stats(cdev); 1000 1001 return 0; 1002 1003 err3: 1004 qed_hw_stop(cdev); 1005 err2: 1006 qed_hw_timers_stop_all(cdev); 1007 if (IS_PF(cdev)) 1008 qed_slowpath_irq_free(cdev); 1009 qed_free_stream_mem(cdev); 1010 qed_disable_msix(cdev); 1011 err1: 1012 qed_resc_free(cdev); 1013 err: 1014 if (IS_PF(cdev)) 1015 release_firmware(cdev->firmware); 1016 1017 if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt) 1018 qed_ptt_release(QED_LEADING_HWFN(cdev), 1019 QED_LEADING_HWFN(cdev)->p_ptp_ptt); 1020 1021 qed_iov_wq_stop(cdev, false); 1022 1023 return rc; 1024 } 1025 1026 static int qed_slowpath_stop(struct qed_dev *cdev) 1027 { 1028 if (!cdev) 1029 return -ENODEV; 1030 1031 qed_ll2_dealloc_if(cdev); 1032 1033 if (IS_PF(cdev)) { 1034 qed_ptt_release(QED_LEADING_HWFN(cdev), 1035 QED_LEADING_HWFN(cdev)->p_ptp_ptt); 1036 qed_free_stream_mem(cdev); 1037 if (IS_QED_ETH_IF(cdev)) 1038 qed_sriov_disable(cdev, true); 1039 1040 qed_nic_stop(cdev); 1041 qed_slowpath_irq_free(cdev); 1042 } 1043 1044 qed_disable_msix(cdev); 1045 qed_nic_reset(cdev); 1046 1047 qed_iov_wq_stop(cdev, true); 1048 1049 if (IS_PF(cdev)) 1050 release_firmware(cdev->firmware); 1051 1052 return 0; 1053 } 1054 1055 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], 1056 char ver_str[VER_SIZE]) 1057 { 1058 int i; 1059 1060 memcpy(cdev->name, name, NAME_SIZE); 1061 for_each_hwfn(cdev, i) 1062 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1063 1064 memcpy(cdev->ver_str, ver_str, VER_SIZE); 1065 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 1066 } 1067 1068 static u32 qed_sb_init(struct qed_dev *cdev, 1069 struct qed_sb_info *sb_info, 1070 void *sb_virt_addr, 1071 dma_addr_t sb_phy_addr, u16 sb_id, 1072 enum qed_sb_type type) 1073 { 1074 struct qed_hwfn *p_hwfn; 1075 struct qed_ptt *p_ptt; 1076 int hwfn_index; 1077 u16 rel_sb_id; 1078 u8 n_hwfns; 1079 u32 rc; 1080 1081 /* RoCE uses single engine and CMT uses two engines. When using both 1082 * we force only a single engine. Storage uses only engine 0 too. 1083 */ 1084 if (type == QED_SB_TYPE_L2_QUEUE) 1085 n_hwfns = cdev->num_hwfns; 1086 else 1087 n_hwfns = 1; 1088 1089 hwfn_index = sb_id % n_hwfns; 1090 p_hwfn = &cdev->hwfns[hwfn_index]; 1091 rel_sb_id = sb_id / n_hwfns; 1092 1093 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1094 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1095 hwfn_index, rel_sb_id, sb_id); 1096 1097 if (IS_PF(p_hwfn->cdev)) { 1098 p_ptt = qed_ptt_acquire(p_hwfn); 1099 if (!p_ptt) 1100 return -EBUSY; 1101 1102 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1103 sb_phy_addr, rel_sb_id); 1104 qed_ptt_release(p_hwfn, p_ptt); 1105 } else { 1106 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1107 sb_phy_addr, rel_sb_id); 1108 } 1109 1110 return rc; 1111 } 1112 1113 static u32 qed_sb_release(struct qed_dev *cdev, 1114 struct qed_sb_info *sb_info, u16 sb_id) 1115 { 1116 struct qed_hwfn *p_hwfn; 1117 int hwfn_index; 1118 u16 rel_sb_id; 1119 u32 rc; 1120 1121 hwfn_index = sb_id % cdev->num_hwfns; 1122 p_hwfn = &cdev->hwfns[hwfn_index]; 1123 rel_sb_id = sb_id / cdev->num_hwfns; 1124 1125 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1126 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1127 hwfn_index, rel_sb_id, sb_id); 1128 1129 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1130 1131 return rc; 1132 } 1133 1134 static bool qed_can_link_change(struct qed_dev *cdev) 1135 { 1136 return true; 1137 } 1138 1139 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1140 { 1141 struct qed_hwfn *hwfn; 1142 struct qed_mcp_link_params *link_params; 1143 struct qed_ptt *ptt; 1144 int rc; 1145 1146 if (!cdev) 1147 return -ENODEV; 1148 1149 /* The link should be set only once per PF */ 1150 hwfn = &cdev->hwfns[0]; 1151 1152 /* When VF wants to set link, force it to read the bulletin instead. 1153 * This mimics the PF behavior, where a noitification [both immediate 1154 * and possible later] would be generated when changing properties. 1155 */ 1156 if (IS_VF(cdev)) { 1157 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1158 return 0; 1159 } 1160 1161 ptt = qed_ptt_acquire(hwfn); 1162 if (!ptt) 1163 return -EBUSY; 1164 1165 link_params = qed_mcp_get_link_params(hwfn); 1166 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1167 link_params->speed.autoneg = params->autoneg; 1168 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1169 link_params->speed.advertised_speeds = 0; 1170 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || 1171 (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) 1172 link_params->speed.advertised_speeds |= 1173 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1174 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) 1175 link_params->speed.advertised_speeds |= 1176 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1177 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) 1178 link_params->speed.advertised_speeds |= 1179 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1180 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) 1181 link_params->speed.advertised_speeds |= 1182 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1183 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) 1184 link_params->speed.advertised_speeds |= 1185 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1186 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) 1187 link_params->speed.advertised_speeds |= 1188 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1189 } 1190 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1191 link_params->speed.forced_speed = params->forced_speed; 1192 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1193 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1194 link_params->pause.autoneg = true; 1195 else 1196 link_params->pause.autoneg = false; 1197 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1198 link_params->pause.forced_rx = true; 1199 else 1200 link_params->pause.forced_rx = false; 1201 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1202 link_params->pause.forced_tx = true; 1203 else 1204 link_params->pause.forced_tx = false; 1205 } 1206 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1207 switch (params->loopback_mode) { 1208 case QED_LINK_LOOPBACK_INT_PHY: 1209 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1210 break; 1211 case QED_LINK_LOOPBACK_EXT_PHY: 1212 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1213 break; 1214 case QED_LINK_LOOPBACK_EXT: 1215 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1216 break; 1217 case QED_LINK_LOOPBACK_MAC: 1218 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1219 break; 1220 default: 1221 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1222 break; 1223 } 1224 } 1225 1226 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1227 1228 qed_ptt_release(hwfn, ptt); 1229 1230 return rc; 1231 } 1232 1233 static int qed_get_port_type(u32 media_type) 1234 { 1235 int port_type; 1236 1237 switch (media_type) { 1238 case MEDIA_SFPP_10G_FIBER: 1239 case MEDIA_SFP_1G_FIBER: 1240 case MEDIA_XFP_FIBER: 1241 case MEDIA_MODULE_FIBER: 1242 case MEDIA_KR: 1243 port_type = PORT_FIBRE; 1244 break; 1245 case MEDIA_DA_TWINAX: 1246 port_type = PORT_DA; 1247 break; 1248 case MEDIA_BASE_T: 1249 port_type = PORT_TP; 1250 break; 1251 case MEDIA_NOT_PRESENT: 1252 port_type = PORT_NONE; 1253 break; 1254 case MEDIA_UNSPECIFIED: 1255 default: 1256 port_type = PORT_OTHER; 1257 break; 1258 } 1259 return port_type; 1260 } 1261 1262 static int qed_get_link_data(struct qed_hwfn *hwfn, 1263 struct qed_mcp_link_params *params, 1264 struct qed_mcp_link_state *link, 1265 struct qed_mcp_link_capabilities *link_caps) 1266 { 1267 void *p; 1268 1269 if (!IS_PF(hwfn->cdev)) { 1270 qed_vf_get_link_params(hwfn, params); 1271 qed_vf_get_link_state(hwfn, link); 1272 qed_vf_get_link_caps(hwfn, link_caps); 1273 1274 return 0; 1275 } 1276 1277 p = qed_mcp_get_link_params(hwfn); 1278 if (!p) 1279 return -ENXIO; 1280 memcpy(params, p, sizeof(*params)); 1281 1282 p = qed_mcp_get_link_state(hwfn); 1283 if (!p) 1284 return -ENXIO; 1285 memcpy(link, p, sizeof(*link)); 1286 1287 p = qed_mcp_get_link_capabilities(hwfn); 1288 if (!p) 1289 return -ENXIO; 1290 memcpy(link_caps, p, sizeof(*link_caps)); 1291 1292 return 0; 1293 } 1294 1295 static void qed_fill_link(struct qed_hwfn *hwfn, 1296 struct qed_link_output *if_link) 1297 { 1298 struct qed_mcp_link_params params; 1299 struct qed_mcp_link_state link; 1300 struct qed_mcp_link_capabilities link_caps; 1301 u32 media_type; 1302 1303 memset(if_link, 0, sizeof(*if_link)); 1304 1305 /* Prepare source inputs */ 1306 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1307 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1308 return; 1309 } 1310 1311 /* Set the link parameters to pass to protocol driver */ 1312 if (link.link_up) 1313 if_link->link_up = true; 1314 1315 /* TODO - at the moment assume supported and advertised speed equal */ 1316 if_link->supported_caps = QED_LM_FIBRE_BIT; 1317 if (params.speed.autoneg) 1318 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1319 if (params.pause.autoneg || 1320 (params.pause.forced_rx && params.pause.forced_tx)) 1321 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1322 if (params.pause.autoneg || params.pause.forced_rx || 1323 params.pause.forced_tx) 1324 if_link->supported_caps |= QED_LM_Pause_BIT; 1325 1326 if_link->advertised_caps = if_link->supported_caps; 1327 if (params.speed.advertised_speeds & 1328 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1329 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | 1330 QED_LM_1000baseT_Full_BIT; 1331 if (params.speed.advertised_speeds & 1332 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1333 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; 1334 if (params.speed.advertised_speeds & 1335 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1336 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; 1337 if (params.speed.advertised_speeds & 1338 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1339 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; 1340 if (params.speed.advertised_speeds & 1341 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1342 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; 1343 if (params.speed.advertised_speeds & 1344 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1345 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; 1346 1347 if (link_caps.speed_capabilities & 1348 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1349 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | 1350 QED_LM_1000baseT_Full_BIT; 1351 if (link_caps.speed_capabilities & 1352 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1353 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; 1354 if (link_caps.speed_capabilities & 1355 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1356 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; 1357 if (link_caps.speed_capabilities & 1358 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1359 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; 1360 if (link_caps.speed_capabilities & 1361 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1362 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; 1363 if (link_caps.speed_capabilities & 1364 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1365 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; 1366 1367 if (link.link_up) 1368 if_link->speed = link.speed; 1369 1370 /* TODO - fill duplex properly */ 1371 if_link->duplex = DUPLEX_FULL; 1372 qed_mcp_get_media_type(hwfn->cdev, &media_type); 1373 if_link->port = qed_get_port_type(media_type); 1374 1375 if_link->autoneg = params.speed.autoneg; 1376 1377 if (params.pause.autoneg) 1378 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1379 if (params.pause.forced_rx) 1380 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1381 if (params.pause.forced_tx) 1382 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1383 1384 /* Link partner capabilities */ 1385 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) 1386 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; 1387 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) 1388 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1389 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1390 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1391 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1392 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1393 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1394 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1395 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1396 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1397 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1398 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1399 1400 if (link.an_complete) 1401 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1402 1403 if (link.partner_adv_pause) 1404 if_link->lp_caps |= QED_LM_Pause_BIT; 1405 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1406 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1407 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1408 } 1409 1410 static void qed_get_current_link(struct qed_dev *cdev, 1411 struct qed_link_output *if_link) 1412 { 1413 int i; 1414 1415 qed_fill_link(&cdev->hwfns[0], if_link); 1416 1417 for_each_hwfn(cdev, i) 1418 qed_inform_vf_link_state(&cdev->hwfns[i]); 1419 } 1420 1421 void qed_link_update(struct qed_hwfn *hwfn) 1422 { 1423 void *cookie = hwfn->cdev->ops_cookie; 1424 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1425 struct qed_link_output if_link; 1426 1427 qed_fill_link(hwfn, &if_link); 1428 qed_inform_vf_link_state(hwfn); 1429 1430 if (IS_LEAD_HWFN(hwfn) && cookie) 1431 op->link_update(cookie, &if_link); 1432 } 1433 1434 static int qed_drain(struct qed_dev *cdev) 1435 { 1436 struct qed_hwfn *hwfn; 1437 struct qed_ptt *ptt; 1438 int i, rc; 1439 1440 if (IS_VF(cdev)) 1441 return 0; 1442 1443 for_each_hwfn(cdev, i) { 1444 hwfn = &cdev->hwfns[i]; 1445 ptt = qed_ptt_acquire(hwfn); 1446 if (!ptt) { 1447 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1448 return -EBUSY; 1449 } 1450 rc = qed_mcp_drain(hwfn, ptt); 1451 if (rc) 1452 return rc; 1453 qed_ptt_release(hwfn, ptt); 1454 } 1455 1456 return 0; 1457 } 1458 1459 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) 1460 { 1461 *rx_coal = cdev->rx_coalesce_usecs; 1462 *tx_coal = cdev->tx_coalesce_usecs; 1463 } 1464 1465 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 1466 u8 qid, u16 sb_id) 1467 { 1468 struct qed_hwfn *hwfn; 1469 struct qed_ptt *ptt; 1470 int hwfn_index; 1471 int status = 0; 1472 1473 hwfn_index = qid % cdev->num_hwfns; 1474 hwfn = &cdev->hwfns[hwfn_index]; 1475 ptt = qed_ptt_acquire(hwfn); 1476 if (!ptt) 1477 return -EAGAIN; 1478 1479 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, 1480 qid / cdev->num_hwfns, sb_id); 1481 if (status) 1482 goto out; 1483 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, 1484 qid / cdev->num_hwfns, sb_id); 1485 out: 1486 qed_ptt_release(hwfn, ptt); 1487 1488 return status; 1489 } 1490 1491 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 1492 { 1493 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1494 struct qed_ptt *ptt; 1495 int status = 0; 1496 1497 ptt = qed_ptt_acquire(hwfn); 1498 if (!ptt) 1499 return -EAGAIN; 1500 1501 status = qed_mcp_set_led(hwfn, ptt, mode); 1502 1503 qed_ptt_release(hwfn, ptt); 1504 1505 return status; 1506 } 1507 1508 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 1509 { 1510 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1511 struct qed_ptt *ptt; 1512 int rc = 0; 1513 1514 if (IS_VF(cdev)) 1515 return 0; 1516 1517 ptt = qed_ptt_acquire(hwfn); 1518 if (!ptt) 1519 return -EAGAIN; 1520 1521 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 1522 : QED_OV_WOL_DISABLED); 1523 if (rc) 1524 goto out; 1525 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1526 1527 out: 1528 qed_ptt_release(hwfn, ptt); 1529 return rc; 1530 } 1531 1532 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 1533 { 1534 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1535 struct qed_ptt *ptt; 1536 int status = 0; 1537 1538 if (IS_VF(cdev)) 1539 return 0; 1540 1541 ptt = qed_ptt_acquire(hwfn); 1542 if (!ptt) 1543 return -EAGAIN; 1544 1545 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 1546 QED_OV_DRIVER_STATE_ACTIVE : 1547 QED_OV_DRIVER_STATE_DISABLED); 1548 1549 qed_ptt_release(hwfn, ptt); 1550 1551 return status; 1552 } 1553 1554 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 1555 { 1556 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1557 struct qed_ptt *ptt; 1558 int status = 0; 1559 1560 if (IS_VF(cdev)) 1561 return 0; 1562 1563 ptt = qed_ptt_acquire(hwfn); 1564 if (!ptt) 1565 return -EAGAIN; 1566 1567 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 1568 if (status) 1569 goto out; 1570 1571 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1572 1573 out: 1574 qed_ptt_release(hwfn, ptt); 1575 return status; 1576 } 1577 1578 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 1579 { 1580 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1581 struct qed_ptt *ptt; 1582 int status = 0; 1583 1584 if (IS_VF(cdev)) 1585 return 0; 1586 1587 ptt = qed_ptt_acquire(hwfn); 1588 if (!ptt) 1589 return -EAGAIN; 1590 1591 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 1592 if (status) 1593 goto out; 1594 1595 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1596 1597 out: 1598 qed_ptt_release(hwfn, ptt); 1599 return status; 1600 } 1601 1602 static struct qed_selftest_ops qed_selftest_ops_pass = { 1603 .selftest_memory = &qed_selftest_memory, 1604 .selftest_interrupt = &qed_selftest_interrupt, 1605 .selftest_register = &qed_selftest_register, 1606 .selftest_clock = &qed_selftest_clock, 1607 .selftest_nvram = &qed_selftest_nvram, 1608 }; 1609 1610 const struct qed_common_ops qed_common_ops_pass = { 1611 .selftest = &qed_selftest_ops_pass, 1612 .probe = &qed_probe, 1613 .remove = &qed_remove, 1614 .set_power_state = &qed_set_power_state, 1615 .set_id = &qed_set_id, 1616 .update_pf_params = &qed_update_pf_params, 1617 .slowpath_start = &qed_slowpath_start, 1618 .slowpath_stop = &qed_slowpath_stop, 1619 .set_fp_int = &qed_set_int_fp, 1620 .get_fp_int = &qed_get_int_fp, 1621 .sb_init = &qed_sb_init, 1622 .sb_release = &qed_sb_release, 1623 .simd_handler_config = &qed_simd_handler_config, 1624 .simd_handler_clean = &qed_simd_handler_clean, 1625 .dbg_grc = &qed_dbg_grc, 1626 .dbg_grc_size = &qed_dbg_grc_size, 1627 .can_link_change = &qed_can_link_change, 1628 .set_link = &qed_set_link, 1629 .get_link = &qed_get_current_link, 1630 .drain = &qed_drain, 1631 .update_msglvl = &qed_init_dp, 1632 .dbg_all_data = &qed_dbg_all_data, 1633 .dbg_all_data_size = &qed_dbg_all_data_size, 1634 .chain_alloc = &qed_chain_alloc, 1635 .chain_free = &qed_chain_free, 1636 .get_coalesce = &qed_get_coalesce, 1637 .set_coalesce = &qed_set_coalesce, 1638 .set_led = &qed_set_led, 1639 .update_drv_state = &qed_update_drv_state, 1640 .update_mac = &qed_update_mac, 1641 .update_mtu = &qed_update_mtu, 1642 .update_wol = &qed_update_wol, 1643 }; 1644 1645 void qed_get_protocol_stats(struct qed_dev *cdev, 1646 enum qed_mcp_protocol_type type, 1647 union qed_mcp_protocol_stats *stats) 1648 { 1649 struct qed_eth_stats eth_stats; 1650 1651 memset(stats, 0, sizeof(*stats)); 1652 1653 switch (type) { 1654 case QED_MCP_LAN_STATS: 1655 qed_get_vport_stats(cdev, ð_stats); 1656 stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts; 1657 stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; 1658 stats->lan_stats.fcs_err = -1; 1659 break; 1660 case QED_MCP_FCOE_STATS: 1661 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 1662 break; 1663 default: 1664 DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1665 return; 1666 } 1667 } 1668