1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/version.h> 38 #include <linux/delay.h> 39 #include <asm/byteorder.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/string.h> 42 #include <linux/module.h> 43 #include <linux/interrupt.h> 44 #include <linux/workqueue.h> 45 #include <linux/ethtool.h> 46 #include <linux/etherdevice.h> 47 #include <linux/vmalloc.h> 48 #include <linux/crash_dump.h> 49 #include <linux/qed/qed_if.h> 50 #include <linux/qed/qed_ll2_if.h> 51 52 #include "qed.h" 53 #include "qed_sriov.h" 54 #include "qed_sp.h" 55 #include "qed_dev_api.h" 56 #include "qed_ll2.h" 57 #include "qed_fcoe.h" 58 #include "qed_iscsi.h" 59 60 #include "qed_mcp.h" 61 #include "qed_hw.h" 62 #include "qed_selftest.h" 63 #include "qed_debug.h" 64 65 #define QED_ROCE_QPS (8192) 66 #define QED_ROCE_DPIS (8) 67 68 static char version[] = 69 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 70 71 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 72 MODULE_LICENSE("GPL"); 73 MODULE_VERSION(DRV_MODULE_VERSION); 74 75 #define FW_FILE_VERSION \ 76 __stringify(FW_MAJOR_VERSION) "." \ 77 __stringify(FW_MINOR_VERSION) "." \ 78 __stringify(FW_REVISION_VERSION) "." \ 79 __stringify(FW_ENGINEERING_VERSION) 80 81 #define QED_FW_FILE_NAME \ 82 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 83 84 MODULE_FIRMWARE(QED_FW_FILE_NAME); 85 86 static int __init qed_init(void) 87 { 88 pr_info("%s", version); 89 90 return 0; 91 } 92 93 static void __exit qed_cleanup(void) 94 { 95 pr_notice("qed_cleanup called\n"); 96 } 97 98 module_init(qed_init); 99 module_exit(qed_cleanup); 100 101 /* Check if the DMA controller on the machine can properly handle the DMA 102 * addressing required by the device. 103 */ 104 static int qed_set_coherency_mask(struct qed_dev *cdev) 105 { 106 struct device *dev = &cdev->pdev->dev; 107 108 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 109 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 110 DP_NOTICE(cdev, 111 "Can't request 64-bit consistent allocations\n"); 112 return -EIO; 113 } 114 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 115 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 116 return -EIO; 117 } 118 119 return 0; 120 } 121 122 static void qed_free_pci(struct qed_dev *cdev) 123 { 124 struct pci_dev *pdev = cdev->pdev; 125 126 if (cdev->doorbells) 127 iounmap(cdev->doorbells); 128 if (cdev->regview) 129 iounmap(cdev->regview); 130 if (atomic_read(&pdev->enable_cnt) == 1) 131 pci_release_regions(pdev); 132 133 pci_disable_device(pdev); 134 } 135 136 #define PCI_REVISION_ID_ERROR_VAL 0xff 137 138 /* Performs PCI initializations as well as initializing PCI-related parameters 139 * in the device structrue. Returns 0 in case of success. 140 */ 141 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 142 { 143 u8 rev_id; 144 int rc; 145 146 cdev->pdev = pdev; 147 148 rc = pci_enable_device(pdev); 149 if (rc) { 150 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 151 goto err0; 152 } 153 154 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 155 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 156 rc = -EIO; 157 goto err1; 158 } 159 160 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 161 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 162 rc = -EIO; 163 goto err1; 164 } 165 166 if (atomic_read(&pdev->enable_cnt) == 1) { 167 rc = pci_request_regions(pdev, "qed"); 168 if (rc) { 169 DP_NOTICE(cdev, 170 "Failed to request PCI memory resources\n"); 171 goto err1; 172 } 173 pci_set_master(pdev); 174 pci_save_state(pdev); 175 } 176 177 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 178 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 179 DP_NOTICE(cdev, 180 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 181 rev_id); 182 rc = -ENODEV; 183 goto err2; 184 } 185 if (!pci_is_pcie(pdev)) { 186 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 187 rc = -EIO; 188 goto err2; 189 } 190 191 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 192 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 193 DP_NOTICE(cdev, "Cannot find power management capability\n"); 194 195 rc = qed_set_coherency_mask(cdev); 196 if (rc) 197 goto err2; 198 199 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 200 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 201 cdev->pci_params.irq = pdev->irq; 202 203 cdev->regview = pci_ioremap_bar(pdev, 0); 204 if (!cdev->regview) { 205 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 206 rc = -ENOMEM; 207 goto err2; 208 } 209 210 if (IS_PF(cdev)) { 211 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 212 cdev->db_size = pci_resource_len(cdev->pdev, 2); 213 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 214 if (!cdev->doorbells) { 215 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 216 return -ENOMEM; 217 } 218 } 219 220 return 0; 221 222 err2: 223 pci_release_regions(pdev); 224 err1: 225 pci_disable_device(pdev); 226 err0: 227 return rc; 228 } 229 230 int qed_fill_dev_info(struct qed_dev *cdev, 231 struct qed_dev_info *dev_info) 232 { 233 struct qed_tunnel_info *tun = &cdev->tunnel; 234 struct qed_ptt *ptt; 235 236 memset(dev_info, 0, sizeof(struct qed_dev_info)); 237 238 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 239 tun->vxlan.b_mode_enabled) 240 dev_info->vxlan_enable = true; 241 242 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 243 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 244 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 245 dev_info->gre_enable = true; 246 247 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 248 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 249 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 250 dev_info->geneve_enable = true; 251 252 dev_info->num_hwfns = cdev->num_hwfns; 253 dev_info->pci_mem_start = cdev->pci_params.mem_start; 254 dev_info->pci_mem_end = cdev->pci_params.mem_end; 255 dev_info->pci_irq = cdev->pci_params.irq; 256 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == 257 QED_PCI_ETH_ROCE); 258 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 259 dev_info->dev_type = cdev->type; 260 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 261 262 if (IS_PF(cdev)) { 263 dev_info->fw_major = FW_MAJOR_VERSION; 264 dev_info->fw_minor = FW_MINOR_VERSION; 265 dev_info->fw_rev = FW_REVISION_VERSION; 266 dev_info->fw_eng = FW_ENGINEERING_VERSION; 267 dev_info->mf_mode = cdev->mf_mode; 268 dev_info->tx_switching = true; 269 270 if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == 271 QED_WOL_SUPPORT_PME) 272 dev_info->wol_support = true; 273 } else { 274 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 275 &dev_info->fw_minor, &dev_info->fw_rev, 276 &dev_info->fw_eng); 277 } 278 279 if (IS_PF(cdev)) { 280 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 281 if (ptt) { 282 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 283 &dev_info->mfw_rev, NULL); 284 285 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 286 &dev_info->flash_size); 287 288 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 289 } 290 } else { 291 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 292 &dev_info->mfw_rev, NULL); 293 } 294 295 dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; 296 297 return 0; 298 } 299 300 static void qed_free_cdev(struct qed_dev *cdev) 301 { 302 kfree((void *)cdev); 303 } 304 305 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 306 { 307 struct qed_dev *cdev; 308 309 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 310 if (!cdev) 311 return cdev; 312 313 qed_init_struct(cdev); 314 315 return cdev; 316 } 317 318 /* Sets the requested power state */ 319 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 320 { 321 if (!cdev) 322 return -ENODEV; 323 324 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 325 return 0; 326 } 327 328 /* probing */ 329 static struct qed_dev *qed_probe(struct pci_dev *pdev, 330 struct qed_probe_params *params) 331 { 332 struct qed_dev *cdev; 333 int rc; 334 335 cdev = qed_alloc_cdev(pdev); 336 if (!cdev) 337 goto err0; 338 339 cdev->protocol = params->protocol; 340 341 if (params->is_vf) 342 cdev->b_is_vf = true; 343 344 qed_init_dp(cdev, params->dp_module, params->dp_level); 345 346 rc = qed_init_pci(cdev, pdev); 347 if (rc) { 348 DP_ERR(cdev, "init pci failed\n"); 349 goto err1; 350 } 351 DP_INFO(cdev, "PCI init completed successfully\n"); 352 353 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 354 if (rc) { 355 DP_ERR(cdev, "hw prepare failed\n"); 356 goto err2; 357 } 358 359 DP_INFO(cdev, "qed_probe completed successffuly\n"); 360 361 return cdev; 362 363 err2: 364 qed_free_pci(cdev); 365 err1: 366 qed_free_cdev(cdev); 367 err0: 368 return NULL; 369 } 370 371 static void qed_remove(struct qed_dev *cdev) 372 { 373 if (!cdev) 374 return; 375 376 qed_hw_remove(cdev); 377 378 qed_free_pci(cdev); 379 380 qed_set_power_state(cdev, PCI_D3hot); 381 382 qed_free_cdev(cdev); 383 } 384 385 static void qed_disable_msix(struct qed_dev *cdev) 386 { 387 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 388 pci_disable_msix(cdev->pdev); 389 kfree(cdev->int_params.msix_table); 390 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 391 pci_disable_msi(cdev->pdev); 392 } 393 394 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 395 } 396 397 static int qed_enable_msix(struct qed_dev *cdev, 398 struct qed_int_params *int_params) 399 { 400 int i, rc, cnt; 401 402 cnt = int_params->in.num_vectors; 403 404 for (i = 0; i < cnt; i++) 405 int_params->msix_table[i].entry = i; 406 407 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 408 int_params->in.min_msix_cnt, cnt); 409 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 410 (rc % cdev->num_hwfns)) { 411 pci_disable_msix(cdev->pdev); 412 413 /* If fastpath is initialized, we need at least one interrupt 414 * per hwfn [and the slow path interrupts]. New requested number 415 * should be a multiple of the number of hwfns. 416 */ 417 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 418 DP_NOTICE(cdev, 419 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 420 cnt, int_params->in.num_vectors); 421 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 422 cnt); 423 if (!rc) 424 rc = cnt; 425 } 426 427 if (rc > 0) { 428 /* MSI-x configuration was achieved */ 429 int_params->out.int_mode = QED_INT_MODE_MSIX; 430 int_params->out.num_vectors = rc; 431 rc = 0; 432 } else { 433 DP_NOTICE(cdev, 434 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 435 cnt, rc); 436 } 437 438 return rc; 439 } 440 441 /* This function outputs the int mode and the number of enabled msix vector */ 442 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 443 { 444 struct qed_int_params *int_params = &cdev->int_params; 445 struct msix_entry *tbl; 446 int rc = 0, cnt; 447 448 switch (int_params->in.int_mode) { 449 case QED_INT_MODE_MSIX: 450 /* Allocate MSIX table */ 451 cnt = int_params->in.num_vectors; 452 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 453 if (!int_params->msix_table) { 454 rc = -ENOMEM; 455 goto out; 456 } 457 458 /* Enable MSIX */ 459 rc = qed_enable_msix(cdev, int_params); 460 if (!rc) 461 goto out; 462 463 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 464 kfree(int_params->msix_table); 465 if (force_mode) 466 goto out; 467 /* Fallthrough */ 468 469 case QED_INT_MODE_MSI: 470 if (cdev->num_hwfns == 1) { 471 rc = pci_enable_msi(cdev->pdev); 472 if (!rc) { 473 int_params->out.int_mode = QED_INT_MODE_MSI; 474 goto out; 475 } 476 477 DP_NOTICE(cdev, "Failed to enable MSI\n"); 478 if (force_mode) 479 goto out; 480 } 481 /* Fallthrough */ 482 483 case QED_INT_MODE_INTA: 484 int_params->out.int_mode = QED_INT_MODE_INTA; 485 rc = 0; 486 goto out; 487 default: 488 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 489 int_params->in.int_mode); 490 rc = -EINVAL; 491 } 492 493 out: 494 if (!rc) 495 DP_INFO(cdev, "Using %s interrupts\n", 496 int_params->out.int_mode == QED_INT_MODE_INTA ? 497 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 498 "MSI" : "MSIX"); 499 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 500 501 return rc; 502 } 503 504 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 505 int index, void(*handler)(void *)) 506 { 507 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 508 int relative_idx = index / cdev->num_hwfns; 509 510 hwfn->simd_proto_handler[relative_idx].func = handler; 511 hwfn->simd_proto_handler[relative_idx].token = token; 512 } 513 514 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 515 { 516 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 517 int relative_idx = index / cdev->num_hwfns; 518 519 memset(&hwfn->simd_proto_handler[relative_idx], 0, 520 sizeof(struct qed_simd_fp_handler)); 521 } 522 523 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 524 { 525 tasklet_schedule((struct tasklet_struct *)tasklet); 526 return IRQ_HANDLED; 527 } 528 529 static irqreturn_t qed_single_int(int irq, void *dev_instance) 530 { 531 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 532 struct qed_hwfn *hwfn; 533 irqreturn_t rc = IRQ_NONE; 534 u64 status; 535 int i, j; 536 537 for (i = 0; i < cdev->num_hwfns; i++) { 538 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 539 540 if (!status) 541 continue; 542 543 hwfn = &cdev->hwfns[i]; 544 545 /* Slowpath interrupt */ 546 if (unlikely(status & 0x1)) { 547 tasklet_schedule(hwfn->sp_dpc); 548 status &= ~0x1; 549 rc = IRQ_HANDLED; 550 } 551 552 /* Fastpath interrupts */ 553 for (j = 0; j < 64; j++) { 554 if ((0x2ULL << j) & status) { 555 hwfn->simd_proto_handler[j].func( 556 hwfn->simd_proto_handler[j].token); 557 status &= ~(0x2ULL << j); 558 rc = IRQ_HANDLED; 559 } 560 } 561 562 if (unlikely(status)) 563 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 564 "got an unknown interrupt status 0x%llx\n", 565 status); 566 } 567 568 return rc; 569 } 570 571 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 572 { 573 struct qed_dev *cdev = hwfn->cdev; 574 u32 int_mode; 575 int rc = 0; 576 u8 id; 577 578 int_mode = cdev->int_params.out.int_mode; 579 if (int_mode == QED_INT_MODE_MSIX) { 580 id = hwfn->my_id; 581 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 582 id, cdev->pdev->bus->number, 583 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 584 rc = request_irq(cdev->int_params.msix_table[id].vector, 585 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 586 } else { 587 unsigned long flags = 0; 588 589 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 590 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 591 PCI_FUNC(cdev->pdev->devfn)); 592 593 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 594 flags |= IRQF_SHARED; 595 596 rc = request_irq(cdev->pdev->irq, qed_single_int, 597 flags, cdev->name, cdev); 598 } 599 600 if (rc) 601 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 602 else 603 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 604 "Requested slowpath %s\n", 605 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 606 607 return rc; 608 } 609 610 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 611 { 612 struct qed_dev *cdev = p_hwfn->cdev; 613 u8 id = p_hwfn->my_id; 614 u32 int_mode; 615 616 int_mode = cdev->int_params.out.int_mode; 617 if (int_mode == QED_INT_MODE_MSIX) 618 synchronize_irq(cdev->int_params.msix_table[id].vector); 619 else 620 synchronize_irq(cdev->pdev->irq); 621 } 622 623 static void qed_slowpath_irq_free(struct qed_dev *cdev) 624 { 625 int i; 626 627 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 628 for_each_hwfn(cdev, i) { 629 if (!cdev->hwfns[i].b_int_requested) 630 break; 631 synchronize_irq(cdev->int_params.msix_table[i].vector); 632 free_irq(cdev->int_params.msix_table[i].vector, 633 cdev->hwfns[i].sp_dpc); 634 } 635 } else { 636 if (QED_LEADING_HWFN(cdev)->b_int_requested) 637 free_irq(cdev->pdev->irq, cdev); 638 } 639 qed_int_disable_post_isr_release(cdev); 640 } 641 642 static int qed_nic_stop(struct qed_dev *cdev) 643 { 644 int i, rc; 645 646 rc = qed_hw_stop(cdev); 647 648 for (i = 0; i < cdev->num_hwfns; i++) { 649 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 650 651 if (p_hwfn->b_sp_dpc_enabled) { 652 tasklet_disable(p_hwfn->sp_dpc); 653 p_hwfn->b_sp_dpc_enabled = false; 654 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 655 "Disabled sp taskelt [hwfn %d] at %p\n", 656 i, p_hwfn->sp_dpc); 657 } 658 } 659 660 qed_dbg_pf_exit(cdev); 661 662 return rc; 663 } 664 665 static int qed_nic_setup(struct qed_dev *cdev) 666 { 667 int rc, i; 668 669 /* Determine if interface is going to require LL2 */ 670 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 671 for (i = 0; i < cdev->num_hwfns; i++) { 672 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 673 674 p_hwfn->using_ll2 = true; 675 } 676 } 677 678 rc = qed_resc_alloc(cdev); 679 if (rc) 680 return rc; 681 682 DP_INFO(cdev, "Allocated qed resources\n"); 683 684 qed_resc_setup(cdev); 685 686 return rc; 687 } 688 689 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 690 { 691 int limit = 0; 692 693 /* Mark the fastpath as free/used */ 694 cdev->int_params.fp_initialized = cnt ? true : false; 695 696 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 697 limit = cdev->num_hwfns * 63; 698 else if (cdev->int_params.fp_msix_cnt) 699 limit = cdev->int_params.fp_msix_cnt; 700 701 if (!limit) 702 return -ENOMEM; 703 704 return min_t(int, cnt, limit); 705 } 706 707 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 708 { 709 memset(info, 0, sizeof(struct qed_int_info)); 710 711 if (!cdev->int_params.fp_initialized) { 712 DP_INFO(cdev, 713 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 714 return -EINVAL; 715 } 716 717 /* Need to expose only MSI-X information; Single IRQ is handled solely 718 * by qed. 719 */ 720 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 721 int msix_base = cdev->int_params.fp_msix_base; 722 723 info->msix_cnt = cdev->int_params.fp_msix_cnt; 724 info->msix = &cdev->int_params.msix_table[msix_base]; 725 } 726 727 return 0; 728 } 729 730 static int qed_slowpath_setup_int(struct qed_dev *cdev, 731 enum qed_int_mode int_mode) 732 { 733 struct qed_sb_cnt_info sb_cnt_info; 734 int num_l2_queues = 0; 735 int rc; 736 int i; 737 738 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 739 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 740 return -EINVAL; 741 } 742 743 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 744 cdev->int_params.in.int_mode = int_mode; 745 for_each_hwfn(cdev, i) { 746 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 747 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 748 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; 749 cdev->int_params.in.num_vectors++; /* slowpath */ 750 } 751 752 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 753 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 754 755 rc = qed_set_int_mode(cdev, false); 756 if (rc) { 757 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 758 return rc; 759 } 760 761 cdev->int_params.fp_msix_base = cdev->num_hwfns; 762 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 763 cdev->num_hwfns; 764 765 if (!IS_ENABLED(CONFIG_QED_RDMA) || 766 QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) 767 return 0; 768 769 for_each_hwfn(cdev, i) 770 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 771 772 DP_VERBOSE(cdev, QED_MSG_RDMA, 773 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 774 cdev->int_params.fp_msix_cnt, num_l2_queues); 775 776 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 777 cdev->int_params.rdma_msix_cnt = 778 (cdev->int_params.fp_msix_cnt - num_l2_queues) 779 / cdev->num_hwfns; 780 cdev->int_params.rdma_msix_base = 781 cdev->int_params.fp_msix_base + num_l2_queues; 782 cdev->int_params.fp_msix_cnt = num_l2_queues; 783 } else { 784 cdev->int_params.rdma_msix_cnt = 0; 785 } 786 787 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 788 cdev->int_params.rdma_msix_cnt, 789 cdev->int_params.rdma_msix_base); 790 791 return 0; 792 } 793 794 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 795 { 796 int rc; 797 798 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 799 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 800 801 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 802 &cdev->int_params.in.num_vectors); 803 if (cdev->num_hwfns > 1) { 804 u8 vectors = 0; 805 806 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 807 cdev->int_params.in.num_vectors += vectors; 808 } 809 810 /* We want a minimum of one fastpath vector per vf hwfn */ 811 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 812 813 rc = qed_set_int_mode(cdev, true); 814 if (rc) 815 return rc; 816 817 cdev->int_params.fp_msix_base = 0; 818 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 819 820 return 0; 821 } 822 823 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 824 u8 *input_buf, u32 max_size, u8 *unzip_buf) 825 { 826 int rc; 827 828 p_hwfn->stream->next_in = input_buf; 829 p_hwfn->stream->avail_in = input_len; 830 p_hwfn->stream->next_out = unzip_buf; 831 p_hwfn->stream->avail_out = max_size; 832 833 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 834 835 if (rc != Z_OK) { 836 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 837 rc); 838 return 0; 839 } 840 841 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 842 zlib_inflateEnd(p_hwfn->stream); 843 844 if (rc != Z_OK && rc != Z_STREAM_END) { 845 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 846 p_hwfn->stream->msg, rc); 847 return 0; 848 } 849 850 return p_hwfn->stream->total_out / 4; 851 } 852 853 static int qed_alloc_stream_mem(struct qed_dev *cdev) 854 { 855 int i; 856 void *workspace; 857 858 for_each_hwfn(cdev, i) { 859 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 860 861 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 862 if (!p_hwfn->stream) 863 return -ENOMEM; 864 865 workspace = vzalloc(zlib_inflate_workspacesize()); 866 if (!workspace) 867 return -ENOMEM; 868 p_hwfn->stream->workspace = workspace; 869 } 870 871 return 0; 872 } 873 874 static void qed_free_stream_mem(struct qed_dev *cdev) 875 { 876 int i; 877 878 for_each_hwfn(cdev, i) { 879 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 880 881 if (!p_hwfn->stream) 882 return; 883 884 vfree(p_hwfn->stream->workspace); 885 kfree(p_hwfn->stream); 886 } 887 } 888 889 static void qed_update_pf_params(struct qed_dev *cdev, 890 struct qed_pf_params *params) 891 { 892 int i; 893 894 if (IS_ENABLED(CONFIG_QED_RDMA)) { 895 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 896 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 897 /* divide by 3 the MRs to avoid MF ILT overflow */ 898 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 899 } 900 901 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 902 params->eth_pf_params.num_arfs_filters = 0; 903 904 /* In case we might support RDMA, don't allow qede to be greedy 905 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. 906 */ 907 if (QED_LEADING_HWFN(cdev)->hw_info.personality == 908 QED_PCI_ETH_ROCE) { 909 u16 *num_cons; 910 911 num_cons = ¶ms->eth_pf_params.num_cons; 912 *num_cons = min_t(u16, *num_cons, 192); 913 } 914 915 for (i = 0; i < cdev->num_hwfns; i++) { 916 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 917 918 p_hwfn->pf_params = *params; 919 } 920 } 921 922 static int qed_slowpath_start(struct qed_dev *cdev, 923 struct qed_slowpath_params *params) 924 { 925 struct qed_drv_load_params drv_load_params; 926 struct qed_hw_init_params hw_init_params; 927 struct qed_mcp_drv_version drv_version; 928 struct qed_tunnel_info tunn_info; 929 const u8 *data = NULL; 930 struct qed_hwfn *hwfn; 931 #ifdef CONFIG_RFS_ACCEL 932 struct qed_ptt *p_ptt; 933 #endif 934 int rc = -EINVAL; 935 936 if (qed_iov_wq_start(cdev)) 937 goto err; 938 939 if (IS_PF(cdev)) { 940 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 941 &cdev->pdev->dev); 942 if (rc) { 943 DP_NOTICE(cdev, 944 "Failed to find fw file - /lib/firmware/%s\n", 945 QED_FW_FILE_NAME); 946 goto err; 947 } 948 949 #ifdef CONFIG_RFS_ACCEL 950 if (cdev->num_hwfns == 1) { 951 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 952 if (p_ptt) { 953 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 954 } else { 955 DP_NOTICE(cdev, 956 "Failed to acquire PTT for aRFS\n"); 957 goto err; 958 } 959 } 960 #endif 961 } 962 963 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 964 rc = qed_nic_setup(cdev); 965 if (rc) 966 goto err; 967 968 if (IS_PF(cdev)) 969 rc = qed_slowpath_setup_int(cdev, params->int_mode); 970 else 971 rc = qed_slowpath_vf_setup_int(cdev); 972 if (rc) 973 goto err1; 974 975 if (IS_PF(cdev)) { 976 /* Allocate stream for unzipping */ 977 rc = qed_alloc_stream_mem(cdev); 978 if (rc) 979 goto err2; 980 981 /* First Dword used to diffrentiate between various sources */ 982 data = cdev->firmware->data + sizeof(u32); 983 984 qed_dbg_pf_init(cdev); 985 } 986 987 /* Start the slowpath */ 988 memset(&hw_init_params, 0, sizeof(hw_init_params)); 989 memset(&tunn_info, 0, sizeof(tunn_info)); 990 tunn_info.vxlan.b_mode_enabled = true; 991 tunn_info.l2_gre.b_mode_enabled = true; 992 tunn_info.ip_gre.b_mode_enabled = true; 993 tunn_info.l2_geneve.b_mode_enabled = true; 994 tunn_info.ip_geneve.b_mode_enabled = true; 995 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 996 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 997 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 998 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 999 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1000 hw_init_params.p_tunn = &tunn_info; 1001 hw_init_params.b_hw_start = true; 1002 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1003 hw_init_params.allow_npar_tx_switch = true; 1004 hw_init_params.bin_fw_data = data; 1005 1006 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1007 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1008 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1009 drv_load_params.avoid_eng_reset = false; 1010 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1011 hw_init_params.p_drv_load_params = &drv_load_params; 1012 1013 rc = qed_hw_init(cdev, &hw_init_params); 1014 if (rc) 1015 goto err2; 1016 1017 DP_INFO(cdev, 1018 "HW initialization and function start completed successfully\n"); 1019 1020 if (IS_PF(cdev)) { 1021 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1022 BIT(QED_MODE_L2GENEVE_TUNN) | 1023 BIT(QED_MODE_IPGENEVE_TUNN) | 1024 BIT(QED_MODE_L2GRE_TUNN) | 1025 BIT(QED_MODE_IPGRE_TUNN)); 1026 } 1027 1028 /* Allocate LL2 interface if needed */ 1029 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1030 rc = qed_ll2_alloc_if(cdev); 1031 if (rc) 1032 goto err3; 1033 } 1034 if (IS_PF(cdev)) { 1035 hwfn = QED_LEADING_HWFN(cdev); 1036 drv_version.version = (params->drv_major << 24) | 1037 (params->drv_minor << 16) | 1038 (params->drv_rev << 8) | 1039 (params->drv_eng); 1040 strlcpy(drv_version.name, params->name, 1041 MCP_DRV_VER_STR_SIZE - 4); 1042 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1043 &drv_version); 1044 if (rc) { 1045 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1046 return rc; 1047 } 1048 } 1049 1050 qed_reset_vport_stats(cdev); 1051 1052 return 0; 1053 1054 err3: 1055 qed_hw_stop(cdev); 1056 err2: 1057 qed_hw_timers_stop_all(cdev); 1058 if (IS_PF(cdev)) 1059 qed_slowpath_irq_free(cdev); 1060 qed_free_stream_mem(cdev); 1061 qed_disable_msix(cdev); 1062 err1: 1063 qed_resc_free(cdev); 1064 err: 1065 if (IS_PF(cdev)) 1066 release_firmware(cdev->firmware); 1067 1068 #ifdef CONFIG_RFS_ACCEL 1069 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1070 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1071 qed_ptt_release(QED_LEADING_HWFN(cdev), 1072 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1073 #endif 1074 1075 qed_iov_wq_stop(cdev, false); 1076 1077 return rc; 1078 } 1079 1080 static int qed_slowpath_stop(struct qed_dev *cdev) 1081 { 1082 if (!cdev) 1083 return -ENODEV; 1084 1085 qed_ll2_dealloc_if(cdev); 1086 1087 if (IS_PF(cdev)) { 1088 #ifdef CONFIG_RFS_ACCEL 1089 if (cdev->num_hwfns == 1) 1090 qed_ptt_release(QED_LEADING_HWFN(cdev), 1091 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1092 #endif 1093 qed_free_stream_mem(cdev); 1094 if (IS_QED_ETH_IF(cdev)) 1095 qed_sriov_disable(cdev, true); 1096 1097 qed_nic_stop(cdev); 1098 qed_slowpath_irq_free(cdev); 1099 } 1100 1101 qed_disable_msix(cdev); 1102 1103 qed_resc_free(cdev); 1104 1105 qed_iov_wq_stop(cdev, true); 1106 1107 if (IS_PF(cdev)) 1108 release_firmware(cdev->firmware); 1109 1110 return 0; 1111 } 1112 1113 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], 1114 char ver_str[VER_SIZE]) 1115 { 1116 int i; 1117 1118 memcpy(cdev->name, name, NAME_SIZE); 1119 for_each_hwfn(cdev, i) 1120 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1121 1122 memcpy(cdev->ver_str, ver_str, VER_SIZE); 1123 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 1124 } 1125 1126 static u32 qed_sb_init(struct qed_dev *cdev, 1127 struct qed_sb_info *sb_info, 1128 void *sb_virt_addr, 1129 dma_addr_t sb_phy_addr, u16 sb_id, 1130 enum qed_sb_type type) 1131 { 1132 struct qed_hwfn *p_hwfn; 1133 struct qed_ptt *p_ptt; 1134 int hwfn_index; 1135 u16 rel_sb_id; 1136 u8 n_hwfns; 1137 u32 rc; 1138 1139 /* RoCE uses single engine and CMT uses two engines. When using both 1140 * we force only a single engine. Storage uses only engine 0 too. 1141 */ 1142 if (type == QED_SB_TYPE_L2_QUEUE) 1143 n_hwfns = cdev->num_hwfns; 1144 else 1145 n_hwfns = 1; 1146 1147 hwfn_index = sb_id % n_hwfns; 1148 p_hwfn = &cdev->hwfns[hwfn_index]; 1149 rel_sb_id = sb_id / n_hwfns; 1150 1151 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1152 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1153 hwfn_index, rel_sb_id, sb_id); 1154 1155 if (IS_PF(p_hwfn->cdev)) { 1156 p_ptt = qed_ptt_acquire(p_hwfn); 1157 if (!p_ptt) 1158 return -EBUSY; 1159 1160 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1161 sb_phy_addr, rel_sb_id); 1162 qed_ptt_release(p_hwfn, p_ptt); 1163 } else { 1164 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1165 sb_phy_addr, rel_sb_id); 1166 } 1167 1168 return rc; 1169 } 1170 1171 static u32 qed_sb_release(struct qed_dev *cdev, 1172 struct qed_sb_info *sb_info, u16 sb_id) 1173 { 1174 struct qed_hwfn *p_hwfn; 1175 int hwfn_index; 1176 u16 rel_sb_id; 1177 u32 rc; 1178 1179 hwfn_index = sb_id % cdev->num_hwfns; 1180 p_hwfn = &cdev->hwfns[hwfn_index]; 1181 rel_sb_id = sb_id / cdev->num_hwfns; 1182 1183 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1184 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1185 hwfn_index, rel_sb_id, sb_id); 1186 1187 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1188 1189 return rc; 1190 } 1191 1192 static bool qed_can_link_change(struct qed_dev *cdev) 1193 { 1194 return true; 1195 } 1196 1197 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1198 { 1199 struct qed_hwfn *hwfn; 1200 struct qed_mcp_link_params *link_params; 1201 struct qed_ptt *ptt; 1202 int rc; 1203 1204 if (!cdev) 1205 return -ENODEV; 1206 1207 /* The link should be set only once per PF */ 1208 hwfn = &cdev->hwfns[0]; 1209 1210 /* When VF wants to set link, force it to read the bulletin instead. 1211 * This mimics the PF behavior, where a noitification [both immediate 1212 * and possible later] would be generated when changing properties. 1213 */ 1214 if (IS_VF(cdev)) { 1215 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1216 return 0; 1217 } 1218 1219 ptt = qed_ptt_acquire(hwfn); 1220 if (!ptt) 1221 return -EBUSY; 1222 1223 link_params = qed_mcp_get_link_params(hwfn); 1224 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1225 link_params->speed.autoneg = params->autoneg; 1226 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1227 link_params->speed.advertised_speeds = 0; 1228 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || 1229 (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) 1230 link_params->speed.advertised_speeds |= 1231 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1232 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) 1233 link_params->speed.advertised_speeds |= 1234 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1235 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) 1236 link_params->speed.advertised_speeds |= 1237 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1238 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) 1239 link_params->speed.advertised_speeds |= 1240 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1241 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) 1242 link_params->speed.advertised_speeds |= 1243 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1244 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) 1245 link_params->speed.advertised_speeds |= 1246 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1247 } 1248 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1249 link_params->speed.forced_speed = params->forced_speed; 1250 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1251 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1252 link_params->pause.autoneg = true; 1253 else 1254 link_params->pause.autoneg = false; 1255 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1256 link_params->pause.forced_rx = true; 1257 else 1258 link_params->pause.forced_rx = false; 1259 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1260 link_params->pause.forced_tx = true; 1261 else 1262 link_params->pause.forced_tx = false; 1263 } 1264 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1265 switch (params->loopback_mode) { 1266 case QED_LINK_LOOPBACK_INT_PHY: 1267 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1268 break; 1269 case QED_LINK_LOOPBACK_EXT_PHY: 1270 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1271 break; 1272 case QED_LINK_LOOPBACK_EXT: 1273 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1274 break; 1275 case QED_LINK_LOOPBACK_MAC: 1276 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1277 break; 1278 default: 1279 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1280 break; 1281 } 1282 } 1283 1284 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1285 1286 qed_ptt_release(hwfn, ptt); 1287 1288 return rc; 1289 } 1290 1291 static int qed_get_port_type(u32 media_type) 1292 { 1293 int port_type; 1294 1295 switch (media_type) { 1296 case MEDIA_SFPP_10G_FIBER: 1297 case MEDIA_SFP_1G_FIBER: 1298 case MEDIA_XFP_FIBER: 1299 case MEDIA_MODULE_FIBER: 1300 case MEDIA_KR: 1301 port_type = PORT_FIBRE; 1302 break; 1303 case MEDIA_DA_TWINAX: 1304 port_type = PORT_DA; 1305 break; 1306 case MEDIA_BASE_T: 1307 port_type = PORT_TP; 1308 break; 1309 case MEDIA_NOT_PRESENT: 1310 port_type = PORT_NONE; 1311 break; 1312 case MEDIA_UNSPECIFIED: 1313 default: 1314 port_type = PORT_OTHER; 1315 break; 1316 } 1317 return port_type; 1318 } 1319 1320 static int qed_get_link_data(struct qed_hwfn *hwfn, 1321 struct qed_mcp_link_params *params, 1322 struct qed_mcp_link_state *link, 1323 struct qed_mcp_link_capabilities *link_caps) 1324 { 1325 void *p; 1326 1327 if (!IS_PF(hwfn->cdev)) { 1328 qed_vf_get_link_params(hwfn, params); 1329 qed_vf_get_link_state(hwfn, link); 1330 qed_vf_get_link_caps(hwfn, link_caps); 1331 1332 return 0; 1333 } 1334 1335 p = qed_mcp_get_link_params(hwfn); 1336 if (!p) 1337 return -ENXIO; 1338 memcpy(params, p, sizeof(*params)); 1339 1340 p = qed_mcp_get_link_state(hwfn); 1341 if (!p) 1342 return -ENXIO; 1343 memcpy(link, p, sizeof(*link)); 1344 1345 p = qed_mcp_get_link_capabilities(hwfn); 1346 if (!p) 1347 return -ENXIO; 1348 memcpy(link_caps, p, sizeof(*link_caps)); 1349 1350 return 0; 1351 } 1352 1353 static void qed_fill_link(struct qed_hwfn *hwfn, 1354 struct qed_link_output *if_link) 1355 { 1356 struct qed_mcp_link_params params; 1357 struct qed_mcp_link_state link; 1358 struct qed_mcp_link_capabilities link_caps; 1359 u32 media_type; 1360 1361 memset(if_link, 0, sizeof(*if_link)); 1362 1363 /* Prepare source inputs */ 1364 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1365 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1366 return; 1367 } 1368 1369 /* Set the link parameters to pass to protocol driver */ 1370 if (link.link_up) 1371 if_link->link_up = true; 1372 1373 /* TODO - at the moment assume supported and advertised speed equal */ 1374 if_link->supported_caps = QED_LM_FIBRE_BIT; 1375 if (params.speed.autoneg) 1376 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1377 if (params.pause.autoneg || 1378 (params.pause.forced_rx && params.pause.forced_tx)) 1379 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1380 if (params.pause.autoneg || params.pause.forced_rx || 1381 params.pause.forced_tx) 1382 if_link->supported_caps |= QED_LM_Pause_BIT; 1383 1384 if_link->advertised_caps = if_link->supported_caps; 1385 if (params.speed.advertised_speeds & 1386 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1387 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | 1388 QED_LM_1000baseT_Full_BIT; 1389 if (params.speed.advertised_speeds & 1390 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1391 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; 1392 if (params.speed.advertised_speeds & 1393 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1394 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; 1395 if (params.speed.advertised_speeds & 1396 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1397 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; 1398 if (params.speed.advertised_speeds & 1399 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1400 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; 1401 if (params.speed.advertised_speeds & 1402 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1403 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; 1404 1405 if (link_caps.speed_capabilities & 1406 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1407 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | 1408 QED_LM_1000baseT_Full_BIT; 1409 if (link_caps.speed_capabilities & 1410 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1411 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; 1412 if (link_caps.speed_capabilities & 1413 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1414 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; 1415 if (link_caps.speed_capabilities & 1416 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1417 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; 1418 if (link_caps.speed_capabilities & 1419 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1420 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; 1421 if (link_caps.speed_capabilities & 1422 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1423 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; 1424 1425 if (link.link_up) 1426 if_link->speed = link.speed; 1427 1428 /* TODO - fill duplex properly */ 1429 if_link->duplex = DUPLEX_FULL; 1430 qed_mcp_get_media_type(hwfn->cdev, &media_type); 1431 if_link->port = qed_get_port_type(media_type); 1432 1433 if_link->autoneg = params.speed.autoneg; 1434 1435 if (params.pause.autoneg) 1436 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1437 if (params.pause.forced_rx) 1438 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1439 if (params.pause.forced_tx) 1440 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1441 1442 /* Link partner capabilities */ 1443 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) 1444 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; 1445 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) 1446 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1447 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1448 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1449 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1450 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1451 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1452 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1453 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1454 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1455 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1456 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1457 1458 if (link.an_complete) 1459 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1460 1461 if (link.partner_adv_pause) 1462 if_link->lp_caps |= QED_LM_Pause_BIT; 1463 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1464 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1465 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1466 } 1467 1468 static void qed_get_current_link(struct qed_dev *cdev, 1469 struct qed_link_output *if_link) 1470 { 1471 int i; 1472 1473 qed_fill_link(&cdev->hwfns[0], if_link); 1474 1475 for_each_hwfn(cdev, i) 1476 qed_inform_vf_link_state(&cdev->hwfns[i]); 1477 } 1478 1479 void qed_link_update(struct qed_hwfn *hwfn) 1480 { 1481 void *cookie = hwfn->cdev->ops_cookie; 1482 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1483 struct qed_link_output if_link; 1484 1485 qed_fill_link(hwfn, &if_link); 1486 qed_inform_vf_link_state(hwfn); 1487 1488 if (IS_LEAD_HWFN(hwfn) && cookie) 1489 op->link_update(cookie, &if_link); 1490 } 1491 1492 static int qed_drain(struct qed_dev *cdev) 1493 { 1494 struct qed_hwfn *hwfn; 1495 struct qed_ptt *ptt; 1496 int i, rc; 1497 1498 if (IS_VF(cdev)) 1499 return 0; 1500 1501 for_each_hwfn(cdev, i) { 1502 hwfn = &cdev->hwfns[i]; 1503 ptt = qed_ptt_acquire(hwfn); 1504 if (!ptt) { 1505 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1506 return -EBUSY; 1507 } 1508 rc = qed_mcp_drain(hwfn, ptt); 1509 if (rc) 1510 return rc; 1511 qed_ptt_release(hwfn, ptt); 1512 } 1513 1514 return 0; 1515 } 1516 1517 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) 1518 { 1519 *rx_coal = cdev->rx_coalesce_usecs; 1520 *tx_coal = cdev->tx_coalesce_usecs; 1521 } 1522 1523 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 1524 u8 qid, u16 sb_id) 1525 { 1526 struct qed_hwfn *hwfn; 1527 struct qed_ptt *ptt; 1528 int hwfn_index; 1529 int status = 0; 1530 1531 hwfn_index = qid % cdev->num_hwfns; 1532 hwfn = &cdev->hwfns[hwfn_index]; 1533 ptt = qed_ptt_acquire(hwfn); 1534 if (!ptt) 1535 return -EAGAIN; 1536 1537 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, 1538 qid / cdev->num_hwfns, sb_id); 1539 if (status) 1540 goto out; 1541 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, 1542 qid / cdev->num_hwfns, sb_id); 1543 out: 1544 qed_ptt_release(hwfn, ptt); 1545 1546 return status; 1547 } 1548 1549 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 1550 { 1551 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1552 struct qed_ptt *ptt; 1553 int status = 0; 1554 1555 ptt = qed_ptt_acquire(hwfn); 1556 if (!ptt) 1557 return -EAGAIN; 1558 1559 status = qed_mcp_set_led(hwfn, ptt, mode); 1560 1561 qed_ptt_release(hwfn, ptt); 1562 1563 return status; 1564 } 1565 1566 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 1567 { 1568 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1569 struct qed_ptt *ptt; 1570 int rc = 0; 1571 1572 if (IS_VF(cdev)) 1573 return 0; 1574 1575 ptt = qed_ptt_acquire(hwfn); 1576 if (!ptt) 1577 return -EAGAIN; 1578 1579 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 1580 : QED_OV_WOL_DISABLED); 1581 if (rc) 1582 goto out; 1583 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1584 1585 out: 1586 qed_ptt_release(hwfn, ptt); 1587 return rc; 1588 } 1589 1590 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 1591 { 1592 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1593 struct qed_ptt *ptt; 1594 int status = 0; 1595 1596 if (IS_VF(cdev)) 1597 return 0; 1598 1599 ptt = qed_ptt_acquire(hwfn); 1600 if (!ptt) 1601 return -EAGAIN; 1602 1603 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 1604 QED_OV_DRIVER_STATE_ACTIVE : 1605 QED_OV_DRIVER_STATE_DISABLED); 1606 1607 qed_ptt_release(hwfn, ptt); 1608 1609 return status; 1610 } 1611 1612 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 1613 { 1614 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1615 struct qed_ptt *ptt; 1616 int status = 0; 1617 1618 if (IS_VF(cdev)) 1619 return 0; 1620 1621 ptt = qed_ptt_acquire(hwfn); 1622 if (!ptt) 1623 return -EAGAIN; 1624 1625 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 1626 if (status) 1627 goto out; 1628 1629 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1630 1631 out: 1632 qed_ptt_release(hwfn, ptt); 1633 return status; 1634 } 1635 1636 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 1637 { 1638 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1639 struct qed_ptt *ptt; 1640 int status = 0; 1641 1642 if (IS_VF(cdev)) 1643 return 0; 1644 1645 ptt = qed_ptt_acquire(hwfn); 1646 if (!ptt) 1647 return -EAGAIN; 1648 1649 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 1650 if (status) 1651 goto out; 1652 1653 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1654 1655 out: 1656 qed_ptt_release(hwfn, ptt); 1657 return status; 1658 } 1659 1660 static struct qed_selftest_ops qed_selftest_ops_pass = { 1661 .selftest_memory = &qed_selftest_memory, 1662 .selftest_interrupt = &qed_selftest_interrupt, 1663 .selftest_register = &qed_selftest_register, 1664 .selftest_clock = &qed_selftest_clock, 1665 .selftest_nvram = &qed_selftest_nvram, 1666 }; 1667 1668 const struct qed_common_ops qed_common_ops_pass = { 1669 .selftest = &qed_selftest_ops_pass, 1670 .probe = &qed_probe, 1671 .remove = &qed_remove, 1672 .set_power_state = &qed_set_power_state, 1673 .set_id = &qed_set_id, 1674 .update_pf_params = &qed_update_pf_params, 1675 .slowpath_start = &qed_slowpath_start, 1676 .slowpath_stop = &qed_slowpath_stop, 1677 .set_fp_int = &qed_set_int_fp, 1678 .get_fp_int = &qed_get_int_fp, 1679 .sb_init = &qed_sb_init, 1680 .sb_release = &qed_sb_release, 1681 .simd_handler_config = &qed_simd_handler_config, 1682 .simd_handler_clean = &qed_simd_handler_clean, 1683 .dbg_grc = &qed_dbg_grc, 1684 .dbg_grc_size = &qed_dbg_grc_size, 1685 .can_link_change = &qed_can_link_change, 1686 .set_link = &qed_set_link, 1687 .get_link = &qed_get_current_link, 1688 .drain = &qed_drain, 1689 .update_msglvl = &qed_init_dp, 1690 .dbg_all_data = &qed_dbg_all_data, 1691 .dbg_all_data_size = &qed_dbg_all_data_size, 1692 .chain_alloc = &qed_chain_alloc, 1693 .chain_free = &qed_chain_free, 1694 .get_coalesce = &qed_get_coalesce, 1695 .set_coalesce = &qed_set_coalesce, 1696 .set_led = &qed_set_led, 1697 .update_drv_state = &qed_update_drv_state, 1698 .update_mac = &qed_update_mac, 1699 .update_mtu = &qed_update_mtu, 1700 .update_wol = &qed_update_wol, 1701 }; 1702 1703 void qed_get_protocol_stats(struct qed_dev *cdev, 1704 enum qed_mcp_protocol_type type, 1705 union qed_mcp_protocol_stats *stats) 1706 { 1707 struct qed_eth_stats eth_stats; 1708 1709 memset(stats, 0, sizeof(*stats)); 1710 1711 switch (type) { 1712 case QED_MCP_LAN_STATS: 1713 qed_get_vport_stats(cdev, ð_stats); 1714 stats->lan_stats.ucast_rx_pkts = 1715 eth_stats.common.rx_ucast_pkts; 1716 stats->lan_stats.ucast_tx_pkts = 1717 eth_stats.common.tx_ucast_pkts; 1718 stats->lan_stats.fcs_err = -1; 1719 break; 1720 case QED_MCP_FCOE_STATS: 1721 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 1722 break; 1723 case QED_MCP_ISCSI_STATS: 1724 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 1725 break; 1726 default: 1727 DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1728 return; 1729 } 1730 } 1731