1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/string.h> 41 #include <linux/module.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/ethtool.h> 45 #include <linux/etherdevice.h> 46 #include <linux/vmalloc.h> 47 #include <linux/crash_dump.h> 48 #include <linux/qed/qed_if.h> 49 #include <linux/qed/qed_ll2_if.h> 50 51 #include "qed.h" 52 #include "qed_sriov.h" 53 #include "qed_sp.h" 54 #include "qed_dev_api.h" 55 #include "qed_ll2.h" 56 #include "qed_fcoe.h" 57 #include "qed_iscsi.h" 58 59 #include "qed_mcp.h" 60 #include "qed_hw.h" 61 #include "qed_selftest.h" 62 #include "qed_debug.h" 63 64 #define QED_ROCE_QPS (8192) 65 #define QED_ROCE_DPIS (8) 66 67 static char version[] = 68 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 69 70 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 71 MODULE_LICENSE("GPL"); 72 MODULE_VERSION(DRV_MODULE_VERSION); 73 74 #define FW_FILE_VERSION \ 75 __stringify(FW_MAJOR_VERSION) "." \ 76 __stringify(FW_MINOR_VERSION) "." \ 77 __stringify(FW_REVISION_VERSION) "." \ 78 __stringify(FW_ENGINEERING_VERSION) 79 80 #define QED_FW_FILE_NAME \ 81 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 82 83 MODULE_FIRMWARE(QED_FW_FILE_NAME); 84 85 static int __init qed_init(void) 86 { 87 pr_info("%s", version); 88 89 return 0; 90 } 91 92 static void __exit qed_cleanup(void) 93 { 94 pr_notice("qed_cleanup called\n"); 95 } 96 97 module_init(qed_init); 98 module_exit(qed_cleanup); 99 100 /* Check if the DMA controller on the machine can properly handle the DMA 101 * addressing required by the device. 102 */ 103 static int qed_set_coherency_mask(struct qed_dev *cdev) 104 { 105 struct device *dev = &cdev->pdev->dev; 106 107 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 108 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 109 DP_NOTICE(cdev, 110 "Can't request 64-bit consistent allocations\n"); 111 return -EIO; 112 } 113 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 114 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 115 return -EIO; 116 } 117 118 return 0; 119 } 120 121 static void qed_free_pci(struct qed_dev *cdev) 122 { 123 struct pci_dev *pdev = cdev->pdev; 124 125 if (cdev->doorbells && cdev->db_size) 126 iounmap(cdev->doorbells); 127 if (cdev->regview) 128 iounmap(cdev->regview); 129 if (atomic_read(&pdev->enable_cnt) == 1) 130 pci_release_regions(pdev); 131 132 pci_disable_device(pdev); 133 } 134 135 #define PCI_REVISION_ID_ERROR_VAL 0xff 136 137 /* Performs PCI initializations as well as initializing PCI-related parameters 138 * in the device structrue. Returns 0 in case of success. 139 */ 140 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 141 { 142 u8 rev_id; 143 int rc; 144 145 cdev->pdev = pdev; 146 147 rc = pci_enable_device(pdev); 148 if (rc) { 149 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 150 goto err0; 151 } 152 153 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 154 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 155 rc = -EIO; 156 goto err1; 157 } 158 159 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 160 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 161 rc = -EIO; 162 goto err1; 163 } 164 165 if (atomic_read(&pdev->enable_cnt) == 1) { 166 rc = pci_request_regions(pdev, "qed"); 167 if (rc) { 168 DP_NOTICE(cdev, 169 "Failed to request PCI memory resources\n"); 170 goto err1; 171 } 172 pci_set_master(pdev); 173 pci_save_state(pdev); 174 } 175 176 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 177 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 178 DP_NOTICE(cdev, 179 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 180 rev_id); 181 rc = -ENODEV; 182 goto err2; 183 } 184 if (!pci_is_pcie(pdev)) { 185 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 186 rc = -EIO; 187 goto err2; 188 } 189 190 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 191 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 192 DP_NOTICE(cdev, "Cannot find power management capability\n"); 193 194 rc = qed_set_coherency_mask(cdev); 195 if (rc) 196 goto err2; 197 198 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 199 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 200 cdev->pci_params.irq = pdev->irq; 201 202 cdev->regview = pci_ioremap_bar(pdev, 0); 203 if (!cdev->regview) { 204 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 205 rc = -ENOMEM; 206 goto err2; 207 } 208 209 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 210 cdev->db_size = pci_resource_len(cdev->pdev, 2); 211 if (!cdev->db_size) { 212 if (IS_PF(cdev)) { 213 DP_NOTICE(cdev, "No Doorbell bar available\n"); 214 return -EINVAL; 215 } else { 216 return 0; 217 } 218 } 219 220 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 221 222 if (!cdev->doorbells) { 223 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 224 return -ENOMEM; 225 } 226 227 return 0; 228 229 err2: 230 pci_release_regions(pdev); 231 err1: 232 pci_disable_device(pdev); 233 err0: 234 return rc; 235 } 236 237 int qed_fill_dev_info(struct qed_dev *cdev, 238 struct qed_dev_info *dev_info) 239 { 240 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 241 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 242 struct qed_tunnel_info *tun = &cdev->tunnel; 243 struct qed_ptt *ptt; 244 245 memset(dev_info, 0, sizeof(struct qed_dev_info)); 246 247 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 248 tun->vxlan.b_mode_enabled) 249 dev_info->vxlan_enable = true; 250 251 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 252 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 253 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 254 dev_info->gre_enable = true; 255 256 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 257 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 258 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 259 dev_info->geneve_enable = true; 260 261 dev_info->num_hwfns = cdev->num_hwfns; 262 dev_info->pci_mem_start = cdev->pci_params.mem_start; 263 dev_info->pci_mem_end = cdev->pci_params.mem_end; 264 dev_info->pci_irq = cdev->pci_params.irq; 265 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 266 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 267 dev_info->dev_type = cdev->type; 268 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 269 270 if (IS_PF(cdev)) { 271 dev_info->fw_major = FW_MAJOR_VERSION; 272 dev_info->fw_minor = FW_MINOR_VERSION; 273 dev_info->fw_rev = FW_REVISION_VERSION; 274 dev_info->fw_eng = FW_ENGINEERING_VERSION; 275 dev_info->mf_mode = cdev->mf_mode; 276 dev_info->tx_switching = true; 277 278 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 279 dev_info->wol_support = true; 280 281 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 282 } else { 283 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 284 &dev_info->fw_minor, &dev_info->fw_rev, 285 &dev_info->fw_eng); 286 } 287 288 if (IS_PF(cdev)) { 289 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 290 if (ptt) { 291 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 292 &dev_info->mfw_rev, NULL); 293 294 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 295 &dev_info->mbi_version); 296 297 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 298 &dev_info->flash_size); 299 300 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 301 } 302 } else { 303 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 304 &dev_info->mfw_rev, NULL); 305 } 306 307 dev_info->mtu = hw_info->mtu; 308 309 return 0; 310 } 311 312 static void qed_free_cdev(struct qed_dev *cdev) 313 { 314 kfree((void *)cdev); 315 } 316 317 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 318 { 319 struct qed_dev *cdev; 320 321 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 322 if (!cdev) 323 return cdev; 324 325 qed_init_struct(cdev); 326 327 return cdev; 328 } 329 330 /* Sets the requested power state */ 331 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 332 { 333 if (!cdev) 334 return -ENODEV; 335 336 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 337 return 0; 338 } 339 340 /* probing */ 341 static struct qed_dev *qed_probe(struct pci_dev *pdev, 342 struct qed_probe_params *params) 343 { 344 struct qed_dev *cdev; 345 int rc; 346 347 cdev = qed_alloc_cdev(pdev); 348 if (!cdev) 349 goto err0; 350 351 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 352 cdev->protocol = params->protocol; 353 354 if (params->is_vf) 355 cdev->b_is_vf = true; 356 357 qed_init_dp(cdev, params->dp_module, params->dp_level); 358 359 rc = qed_init_pci(cdev, pdev); 360 if (rc) { 361 DP_ERR(cdev, "init pci failed\n"); 362 goto err1; 363 } 364 DP_INFO(cdev, "PCI init completed successfully\n"); 365 366 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 367 if (rc) { 368 DP_ERR(cdev, "hw prepare failed\n"); 369 goto err2; 370 } 371 372 DP_INFO(cdev, "qed_probe completed successffuly\n"); 373 374 return cdev; 375 376 err2: 377 qed_free_pci(cdev); 378 err1: 379 qed_free_cdev(cdev); 380 err0: 381 return NULL; 382 } 383 384 static void qed_remove(struct qed_dev *cdev) 385 { 386 if (!cdev) 387 return; 388 389 qed_hw_remove(cdev); 390 391 qed_free_pci(cdev); 392 393 qed_set_power_state(cdev, PCI_D3hot); 394 395 qed_free_cdev(cdev); 396 } 397 398 static void qed_disable_msix(struct qed_dev *cdev) 399 { 400 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 401 pci_disable_msix(cdev->pdev); 402 kfree(cdev->int_params.msix_table); 403 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 404 pci_disable_msi(cdev->pdev); 405 } 406 407 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 408 } 409 410 static int qed_enable_msix(struct qed_dev *cdev, 411 struct qed_int_params *int_params) 412 { 413 int i, rc, cnt; 414 415 cnt = int_params->in.num_vectors; 416 417 for (i = 0; i < cnt; i++) 418 int_params->msix_table[i].entry = i; 419 420 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 421 int_params->in.min_msix_cnt, cnt); 422 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 423 (rc % cdev->num_hwfns)) { 424 pci_disable_msix(cdev->pdev); 425 426 /* If fastpath is initialized, we need at least one interrupt 427 * per hwfn [and the slow path interrupts]. New requested number 428 * should be a multiple of the number of hwfns. 429 */ 430 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 431 DP_NOTICE(cdev, 432 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 433 cnt, int_params->in.num_vectors); 434 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 435 cnt); 436 if (!rc) 437 rc = cnt; 438 } 439 440 if (rc > 0) { 441 /* MSI-x configuration was achieved */ 442 int_params->out.int_mode = QED_INT_MODE_MSIX; 443 int_params->out.num_vectors = rc; 444 rc = 0; 445 } else { 446 DP_NOTICE(cdev, 447 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 448 cnt, rc); 449 } 450 451 return rc; 452 } 453 454 /* This function outputs the int mode and the number of enabled msix vector */ 455 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 456 { 457 struct qed_int_params *int_params = &cdev->int_params; 458 struct msix_entry *tbl; 459 int rc = 0, cnt; 460 461 switch (int_params->in.int_mode) { 462 case QED_INT_MODE_MSIX: 463 /* Allocate MSIX table */ 464 cnt = int_params->in.num_vectors; 465 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 466 if (!int_params->msix_table) { 467 rc = -ENOMEM; 468 goto out; 469 } 470 471 /* Enable MSIX */ 472 rc = qed_enable_msix(cdev, int_params); 473 if (!rc) 474 goto out; 475 476 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 477 kfree(int_params->msix_table); 478 if (force_mode) 479 goto out; 480 /* Fallthrough */ 481 482 case QED_INT_MODE_MSI: 483 if (cdev->num_hwfns == 1) { 484 rc = pci_enable_msi(cdev->pdev); 485 if (!rc) { 486 int_params->out.int_mode = QED_INT_MODE_MSI; 487 goto out; 488 } 489 490 DP_NOTICE(cdev, "Failed to enable MSI\n"); 491 if (force_mode) 492 goto out; 493 } 494 /* Fallthrough */ 495 496 case QED_INT_MODE_INTA: 497 int_params->out.int_mode = QED_INT_MODE_INTA; 498 rc = 0; 499 goto out; 500 default: 501 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 502 int_params->in.int_mode); 503 rc = -EINVAL; 504 } 505 506 out: 507 if (!rc) 508 DP_INFO(cdev, "Using %s interrupts\n", 509 int_params->out.int_mode == QED_INT_MODE_INTA ? 510 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 511 "MSI" : "MSIX"); 512 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 513 514 return rc; 515 } 516 517 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 518 int index, void(*handler)(void *)) 519 { 520 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 521 int relative_idx = index / cdev->num_hwfns; 522 523 hwfn->simd_proto_handler[relative_idx].func = handler; 524 hwfn->simd_proto_handler[relative_idx].token = token; 525 } 526 527 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 528 { 529 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 530 int relative_idx = index / cdev->num_hwfns; 531 532 memset(&hwfn->simd_proto_handler[relative_idx], 0, 533 sizeof(struct qed_simd_fp_handler)); 534 } 535 536 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 537 { 538 tasklet_schedule((struct tasklet_struct *)tasklet); 539 return IRQ_HANDLED; 540 } 541 542 static irqreturn_t qed_single_int(int irq, void *dev_instance) 543 { 544 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 545 struct qed_hwfn *hwfn; 546 irqreturn_t rc = IRQ_NONE; 547 u64 status; 548 int i, j; 549 550 for (i = 0; i < cdev->num_hwfns; i++) { 551 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 552 553 if (!status) 554 continue; 555 556 hwfn = &cdev->hwfns[i]; 557 558 /* Slowpath interrupt */ 559 if (unlikely(status & 0x1)) { 560 tasklet_schedule(hwfn->sp_dpc); 561 status &= ~0x1; 562 rc = IRQ_HANDLED; 563 } 564 565 /* Fastpath interrupts */ 566 for (j = 0; j < 64; j++) { 567 if ((0x2ULL << j) & status) { 568 hwfn->simd_proto_handler[j].func( 569 hwfn->simd_proto_handler[j].token); 570 status &= ~(0x2ULL << j); 571 rc = IRQ_HANDLED; 572 } 573 } 574 575 if (unlikely(status)) 576 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 577 "got an unknown interrupt status 0x%llx\n", 578 status); 579 } 580 581 return rc; 582 } 583 584 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 585 { 586 struct qed_dev *cdev = hwfn->cdev; 587 u32 int_mode; 588 int rc = 0; 589 u8 id; 590 591 int_mode = cdev->int_params.out.int_mode; 592 if (int_mode == QED_INT_MODE_MSIX) { 593 id = hwfn->my_id; 594 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 595 id, cdev->pdev->bus->number, 596 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 597 rc = request_irq(cdev->int_params.msix_table[id].vector, 598 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 599 } else { 600 unsigned long flags = 0; 601 602 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 603 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 604 PCI_FUNC(cdev->pdev->devfn)); 605 606 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 607 flags |= IRQF_SHARED; 608 609 rc = request_irq(cdev->pdev->irq, qed_single_int, 610 flags, cdev->name, cdev); 611 } 612 613 if (rc) 614 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 615 else 616 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 617 "Requested slowpath %s\n", 618 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 619 620 return rc; 621 } 622 623 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 624 { 625 /* Calling the disable function will make sure that any 626 * currently-running function is completed. The following call to the 627 * enable function makes this sequence a flush-like operation. 628 */ 629 if (p_hwfn->b_sp_dpc_enabled) { 630 tasklet_disable(p_hwfn->sp_dpc); 631 tasklet_enable(p_hwfn->sp_dpc); 632 } 633 } 634 635 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 636 { 637 struct qed_dev *cdev = p_hwfn->cdev; 638 u8 id = p_hwfn->my_id; 639 u32 int_mode; 640 641 int_mode = cdev->int_params.out.int_mode; 642 if (int_mode == QED_INT_MODE_MSIX) 643 synchronize_irq(cdev->int_params.msix_table[id].vector); 644 else 645 synchronize_irq(cdev->pdev->irq); 646 647 qed_slowpath_tasklet_flush(p_hwfn); 648 } 649 650 static void qed_slowpath_irq_free(struct qed_dev *cdev) 651 { 652 int i; 653 654 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 655 for_each_hwfn(cdev, i) { 656 if (!cdev->hwfns[i].b_int_requested) 657 break; 658 synchronize_irq(cdev->int_params.msix_table[i].vector); 659 free_irq(cdev->int_params.msix_table[i].vector, 660 cdev->hwfns[i].sp_dpc); 661 } 662 } else { 663 if (QED_LEADING_HWFN(cdev)->b_int_requested) 664 free_irq(cdev->pdev->irq, cdev); 665 } 666 qed_int_disable_post_isr_release(cdev); 667 } 668 669 static int qed_nic_stop(struct qed_dev *cdev) 670 { 671 int i, rc; 672 673 rc = qed_hw_stop(cdev); 674 675 for (i = 0; i < cdev->num_hwfns; i++) { 676 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 677 678 if (p_hwfn->b_sp_dpc_enabled) { 679 tasklet_disable(p_hwfn->sp_dpc); 680 p_hwfn->b_sp_dpc_enabled = false; 681 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 682 "Disabled sp taskelt [hwfn %d] at %p\n", 683 i, p_hwfn->sp_dpc); 684 } 685 } 686 687 qed_dbg_pf_exit(cdev); 688 689 return rc; 690 } 691 692 static int qed_nic_setup(struct qed_dev *cdev) 693 { 694 int rc, i; 695 696 /* Determine if interface is going to require LL2 */ 697 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 698 for (i = 0; i < cdev->num_hwfns; i++) { 699 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 700 701 p_hwfn->using_ll2 = true; 702 } 703 } 704 705 rc = qed_resc_alloc(cdev); 706 if (rc) 707 return rc; 708 709 DP_INFO(cdev, "Allocated qed resources\n"); 710 711 qed_resc_setup(cdev); 712 713 return rc; 714 } 715 716 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 717 { 718 int limit = 0; 719 720 /* Mark the fastpath as free/used */ 721 cdev->int_params.fp_initialized = cnt ? true : false; 722 723 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 724 limit = cdev->num_hwfns * 63; 725 else if (cdev->int_params.fp_msix_cnt) 726 limit = cdev->int_params.fp_msix_cnt; 727 728 if (!limit) 729 return -ENOMEM; 730 731 return min_t(int, cnt, limit); 732 } 733 734 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 735 { 736 memset(info, 0, sizeof(struct qed_int_info)); 737 738 if (!cdev->int_params.fp_initialized) { 739 DP_INFO(cdev, 740 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 741 return -EINVAL; 742 } 743 744 /* Need to expose only MSI-X information; Single IRQ is handled solely 745 * by qed. 746 */ 747 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 748 int msix_base = cdev->int_params.fp_msix_base; 749 750 info->msix_cnt = cdev->int_params.fp_msix_cnt; 751 info->msix = &cdev->int_params.msix_table[msix_base]; 752 } 753 754 return 0; 755 } 756 757 static int qed_slowpath_setup_int(struct qed_dev *cdev, 758 enum qed_int_mode int_mode) 759 { 760 struct qed_sb_cnt_info sb_cnt_info; 761 int num_l2_queues = 0; 762 int rc; 763 int i; 764 765 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 766 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 767 return -EINVAL; 768 } 769 770 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 771 cdev->int_params.in.int_mode = int_mode; 772 for_each_hwfn(cdev, i) { 773 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 774 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 775 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 776 cdev->int_params.in.num_vectors++; /* slowpath */ 777 } 778 779 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 780 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 781 782 rc = qed_set_int_mode(cdev, false); 783 if (rc) { 784 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 785 return rc; 786 } 787 788 cdev->int_params.fp_msix_base = cdev->num_hwfns; 789 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 790 cdev->num_hwfns; 791 792 if (!IS_ENABLED(CONFIG_QED_RDMA) || 793 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 794 return 0; 795 796 for_each_hwfn(cdev, i) 797 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 798 799 DP_VERBOSE(cdev, QED_MSG_RDMA, 800 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 801 cdev->int_params.fp_msix_cnt, num_l2_queues); 802 803 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 804 cdev->int_params.rdma_msix_cnt = 805 (cdev->int_params.fp_msix_cnt - num_l2_queues) 806 / cdev->num_hwfns; 807 cdev->int_params.rdma_msix_base = 808 cdev->int_params.fp_msix_base + num_l2_queues; 809 cdev->int_params.fp_msix_cnt = num_l2_queues; 810 } else { 811 cdev->int_params.rdma_msix_cnt = 0; 812 } 813 814 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 815 cdev->int_params.rdma_msix_cnt, 816 cdev->int_params.rdma_msix_base); 817 818 return 0; 819 } 820 821 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 822 { 823 int rc; 824 825 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 826 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 827 828 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 829 &cdev->int_params.in.num_vectors); 830 if (cdev->num_hwfns > 1) { 831 u8 vectors = 0; 832 833 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 834 cdev->int_params.in.num_vectors += vectors; 835 } 836 837 /* We want a minimum of one fastpath vector per vf hwfn */ 838 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 839 840 rc = qed_set_int_mode(cdev, true); 841 if (rc) 842 return rc; 843 844 cdev->int_params.fp_msix_base = 0; 845 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 846 847 return 0; 848 } 849 850 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 851 u8 *input_buf, u32 max_size, u8 *unzip_buf) 852 { 853 int rc; 854 855 p_hwfn->stream->next_in = input_buf; 856 p_hwfn->stream->avail_in = input_len; 857 p_hwfn->stream->next_out = unzip_buf; 858 p_hwfn->stream->avail_out = max_size; 859 860 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 861 862 if (rc != Z_OK) { 863 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 864 rc); 865 return 0; 866 } 867 868 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 869 zlib_inflateEnd(p_hwfn->stream); 870 871 if (rc != Z_OK && rc != Z_STREAM_END) { 872 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 873 p_hwfn->stream->msg, rc); 874 return 0; 875 } 876 877 return p_hwfn->stream->total_out / 4; 878 } 879 880 static int qed_alloc_stream_mem(struct qed_dev *cdev) 881 { 882 int i; 883 void *workspace; 884 885 for_each_hwfn(cdev, i) { 886 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 887 888 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 889 if (!p_hwfn->stream) 890 return -ENOMEM; 891 892 workspace = vzalloc(zlib_inflate_workspacesize()); 893 if (!workspace) 894 return -ENOMEM; 895 p_hwfn->stream->workspace = workspace; 896 } 897 898 return 0; 899 } 900 901 static void qed_free_stream_mem(struct qed_dev *cdev) 902 { 903 int i; 904 905 for_each_hwfn(cdev, i) { 906 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 907 908 if (!p_hwfn->stream) 909 return; 910 911 vfree(p_hwfn->stream->workspace); 912 kfree(p_hwfn->stream); 913 } 914 } 915 916 static void qed_update_pf_params(struct qed_dev *cdev, 917 struct qed_pf_params *params) 918 { 919 int i; 920 921 if (IS_ENABLED(CONFIG_QED_RDMA)) { 922 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 923 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 924 /* divide by 3 the MRs to avoid MF ILT overflow */ 925 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 926 } 927 928 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 929 params->eth_pf_params.num_arfs_filters = 0; 930 931 /* In case we might support RDMA, don't allow qede to be greedy 932 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. 933 */ 934 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 935 u16 *num_cons; 936 937 num_cons = ¶ms->eth_pf_params.num_cons; 938 *num_cons = min_t(u16, *num_cons, 192); 939 } 940 941 for (i = 0; i < cdev->num_hwfns; i++) { 942 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 943 944 p_hwfn->pf_params = *params; 945 } 946 } 947 948 static int qed_slowpath_start(struct qed_dev *cdev, 949 struct qed_slowpath_params *params) 950 { 951 struct qed_drv_load_params drv_load_params; 952 struct qed_hw_init_params hw_init_params; 953 struct qed_mcp_drv_version drv_version; 954 struct qed_tunnel_info tunn_info; 955 const u8 *data = NULL; 956 struct qed_hwfn *hwfn; 957 struct qed_ptt *p_ptt; 958 int rc = -EINVAL; 959 960 if (qed_iov_wq_start(cdev)) 961 goto err; 962 963 if (IS_PF(cdev)) { 964 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 965 &cdev->pdev->dev); 966 if (rc) { 967 DP_NOTICE(cdev, 968 "Failed to find fw file - /lib/firmware/%s\n", 969 QED_FW_FILE_NAME); 970 goto err; 971 } 972 973 if (cdev->num_hwfns == 1) { 974 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 975 if (p_ptt) { 976 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 977 } else { 978 DP_NOTICE(cdev, 979 "Failed to acquire PTT for aRFS\n"); 980 goto err; 981 } 982 } 983 } 984 985 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 986 rc = qed_nic_setup(cdev); 987 if (rc) 988 goto err; 989 990 if (IS_PF(cdev)) 991 rc = qed_slowpath_setup_int(cdev, params->int_mode); 992 else 993 rc = qed_slowpath_vf_setup_int(cdev); 994 if (rc) 995 goto err1; 996 997 if (IS_PF(cdev)) { 998 /* Allocate stream for unzipping */ 999 rc = qed_alloc_stream_mem(cdev); 1000 if (rc) 1001 goto err2; 1002 1003 /* First Dword used to differentiate between various sources */ 1004 data = cdev->firmware->data + sizeof(u32); 1005 1006 qed_dbg_pf_init(cdev); 1007 } 1008 1009 /* Start the slowpath */ 1010 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1011 memset(&tunn_info, 0, sizeof(tunn_info)); 1012 tunn_info.vxlan.b_mode_enabled = true; 1013 tunn_info.l2_gre.b_mode_enabled = true; 1014 tunn_info.ip_gre.b_mode_enabled = true; 1015 tunn_info.l2_geneve.b_mode_enabled = true; 1016 tunn_info.ip_geneve.b_mode_enabled = true; 1017 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1018 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1019 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1020 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1021 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1022 hw_init_params.p_tunn = &tunn_info; 1023 hw_init_params.b_hw_start = true; 1024 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1025 hw_init_params.allow_npar_tx_switch = true; 1026 hw_init_params.bin_fw_data = data; 1027 1028 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1029 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1030 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1031 drv_load_params.avoid_eng_reset = false; 1032 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1033 hw_init_params.p_drv_load_params = &drv_load_params; 1034 1035 rc = qed_hw_init(cdev, &hw_init_params); 1036 if (rc) 1037 goto err2; 1038 1039 DP_INFO(cdev, 1040 "HW initialization and function start completed successfully\n"); 1041 1042 if (IS_PF(cdev)) { 1043 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1044 BIT(QED_MODE_L2GENEVE_TUNN) | 1045 BIT(QED_MODE_IPGENEVE_TUNN) | 1046 BIT(QED_MODE_L2GRE_TUNN) | 1047 BIT(QED_MODE_IPGRE_TUNN)); 1048 } 1049 1050 /* Allocate LL2 interface if needed */ 1051 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1052 rc = qed_ll2_alloc_if(cdev); 1053 if (rc) 1054 goto err3; 1055 } 1056 if (IS_PF(cdev)) { 1057 hwfn = QED_LEADING_HWFN(cdev); 1058 drv_version.version = (params->drv_major << 24) | 1059 (params->drv_minor << 16) | 1060 (params->drv_rev << 8) | 1061 (params->drv_eng); 1062 strlcpy(drv_version.name, params->name, 1063 MCP_DRV_VER_STR_SIZE - 4); 1064 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1065 &drv_version); 1066 if (rc) { 1067 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1068 return rc; 1069 } 1070 } 1071 1072 qed_reset_vport_stats(cdev); 1073 1074 return 0; 1075 1076 err3: 1077 qed_hw_stop(cdev); 1078 err2: 1079 qed_hw_timers_stop_all(cdev); 1080 if (IS_PF(cdev)) 1081 qed_slowpath_irq_free(cdev); 1082 qed_free_stream_mem(cdev); 1083 qed_disable_msix(cdev); 1084 err1: 1085 qed_resc_free(cdev); 1086 err: 1087 if (IS_PF(cdev)) 1088 release_firmware(cdev->firmware); 1089 1090 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1091 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1092 qed_ptt_release(QED_LEADING_HWFN(cdev), 1093 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1094 1095 qed_iov_wq_stop(cdev, false); 1096 1097 return rc; 1098 } 1099 1100 static int qed_slowpath_stop(struct qed_dev *cdev) 1101 { 1102 if (!cdev) 1103 return -ENODEV; 1104 1105 qed_ll2_dealloc_if(cdev); 1106 1107 if (IS_PF(cdev)) { 1108 if (cdev->num_hwfns == 1) 1109 qed_ptt_release(QED_LEADING_HWFN(cdev), 1110 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1111 qed_free_stream_mem(cdev); 1112 if (IS_QED_ETH_IF(cdev)) 1113 qed_sriov_disable(cdev, true); 1114 } 1115 1116 qed_nic_stop(cdev); 1117 1118 if (IS_PF(cdev)) 1119 qed_slowpath_irq_free(cdev); 1120 1121 qed_disable_msix(cdev); 1122 1123 qed_resc_free(cdev); 1124 1125 qed_iov_wq_stop(cdev, true); 1126 1127 if (IS_PF(cdev)) 1128 release_firmware(cdev->firmware); 1129 1130 return 0; 1131 } 1132 1133 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1134 { 1135 int i; 1136 1137 memcpy(cdev->name, name, NAME_SIZE); 1138 for_each_hwfn(cdev, i) 1139 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1140 } 1141 1142 static u32 qed_sb_init(struct qed_dev *cdev, 1143 struct qed_sb_info *sb_info, 1144 void *sb_virt_addr, 1145 dma_addr_t sb_phy_addr, u16 sb_id, 1146 enum qed_sb_type type) 1147 { 1148 struct qed_hwfn *p_hwfn; 1149 struct qed_ptt *p_ptt; 1150 int hwfn_index; 1151 u16 rel_sb_id; 1152 u8 n_hwfns; 1153 u32 rc; 1154 1155 /* RoCE uses single engine and CMT uses two engines. When using both 1156 * we force only a single engine. Storage uses only engine 0 too. 1157 */ 1158 if (type == QED_SB_TYPE_L2_QUEUE) 1159 n_hwfns = cdev->num_hwfns; 1160 else 1161 n_hwfns = 1; 1162 1163 hwfn_index = sb_id % n_hwfns; 1164 p_hwfn = &cdev->hwfns[hwfn_index]; 1165 rel_sb_id = sb_id / n_hwfns; 1166 1167 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1168 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1169 hwfn_index, rel_sb_id, sb_id); 1170 1171 if (IS_PF(p_hwfn->cdev)) { 1172 p_ptt = qed_ptt_acquire(p_hwfn); 1173 if (!p_ptt) 1174 return -EBUSY; 1175 1176 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1177 sb_phy_addr, rel_sb_id); 1178 qed_ptt_release(p_hwfn, p_ptt); 1179 } else { 1180 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1181 sb_phy_addr, rel_sb_id); 1182 } 1183 1184 return rc; 1185 } 1186 1187 static u32 qed_sb_release(struct qed_dev *cdev, 1188 struct qed_sb_info *sb_info, u16 sb_id) 1189 { 1190 struct qed_hwfn *p_hwfn; 1191 int hwfn_index; 1192 u16 rel_sb_id; 1193 u32 rc; 1194 1195 hwfn_index = sb_id % cdev->num_hwfns; 1196 p_hwfn = &cdev->hwfns[hwfn_index]; 1197 rel_sb_id = sb_id / cdev->num_hwfns; 1198 1199 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1200 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1201 hwfn_index, rel_sb_id, sb_id); 1202 1203 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1204 1205 return rc; 1206 } 1207 1208 static bool qed_can_link_change(struct qed_dev *cdev) 1209 { 1210 return true; 1211 } 1212 1213 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1214 { 1215 struct qed_hwfn *hwfn; 1216 struct qed_mcp_link_params *link_params; 1217 struct qed_ptt *ptt; 1218 int rc; 1219 1220 if (!cdev) 1221 return -ENODEV; 1222 1223 /* The link should be set only once per PF */ 1224 hwfn = &cdev->hwfns[0]; 1225 1226 /* When VF wants to set link, force it to read the bulletin instead. 1227 * This mimics the PF behavior, where a noitification [both immediate 1228 * and possible later] would be generated when changing properties. 1229 */ 1230 if (IS_VF(cdev)) { 1231 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1232 return 0; 1233 } 1234 1235 ptt = qed_ptt_acquire(hwfn); 1236 if (!ptt) 1237 return -EBUSY; 1238 1239 link_params = qed_mcp_get_link_params(hwfn); 1240 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1241 link_params->speed.autoneg = params->autoneg; 1242 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1243 link_params->speed.advertised_speeds = 0; 1244 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || 1245 (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) 1246 link_params->speed.advertised_speeds |= 1247 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1248 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) 1249 link_params->speed.advertised_speeds |= 1250 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1251 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) 1252 link_params->speed.advertised_speeds |= 1253 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1254 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) 1255 link_params->speed.advertised_speeds |= 1256 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1257 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) 1258 link_params->speed.advertised_speeds |= 1259 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1260 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) 1261 link_params->speed.advertised_speeds |= 1262 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1263 } 1264 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1265 link_params->speed.forced_speed = params->forced_speed; 1266 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1267 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1268 link_params->pause.autoneg = true; 1269 else 1270 link_params->pause.autoneg = false; 1271 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1272 link_params->pause.forced_rx = true; 1273 else 1274 link_params->pause.forced_rx = false; 1275 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1276 link_params->pause.forced_tx = true; 1277 else 1278 link_params->pause.forced_tx = false; 1279 } 1280 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1281 switch (params->loopback_mode) { 1282 case QED_LINK_LOOPBACK_INT_PHY: 1283 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1284 break; 1285 case QED_LINK_LOOPBACK_EXT_PHY: 1286 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1287 break; 1288 case QED_LINK_LOOPBACK_EXT: 1289 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1290 break; 1291 case QED_LINK_LOOPBACK_MAC: 1292 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1293 break; 1294 default: 1295 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1296 break; 1297 } 1298 } 1299 1300 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1301 memcpy(&link_params->eee, ¶ms->eee, 1302 sizeof(link_params->eee)); 1303 1304 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1305 1306 qed_ptt_release(hwfn, ptt); 1307 1308 return rc; 1309 } 1310 1311 static int qed_get_port_type(u32 media_type) 1312 { 1313 int port_type; 1314 1315 switch (media_type) { 1316 case MEDIA_SFPP_10G_FIBER: 1317 case MEDIA_SFP_1G_FIBER: 1318 case MEDIA_XFP_FIBER: 1319 case MEDIA_MODULE_FIBER: 1320 case MEDIA_KR: 1321 port_type = PORT_FIBRE; 1322 break; 1323 case MEDIA_DA_TWINAX: 1324 port_type = PORT_DA; 1325 break; 1326 case MEDIA_BASE_T: 1327 port_type = PORT_TP; 1328 break; 1329 case MEDIA_NOT_PRESENT: 1330 port_type = PORT_NONE; 1331 break; 1332 case MEDIA_UNSPECIFIED: 1333 default: 1334 port_type = PORT_OTHER; 1335 break; 1336 } 1337 return port_type; 1338 } 1339 1340 static int qed_get_link_data(struct qed_hwfn *hwfn, 1341 struct qed_mcp_link_params *params, 1342 struct qed_mcp_link_state *link, 1343 struct qed_mcp_link_capabilities *link_caps) 1344 { 1345 void *p; 1346 1347 if (!IS_PF(hwfn->cdev)) { 1348 qed_vf_get_link_params(hwfn, params); 1349 qed_vf_get_link_state(hwfn, link); 1350 qed_vf_get_link_caps(hwfn, link_caps); 1351 1352 return 0; 1353 } 1354 1355 p = qed_mcp_get_link_params(hwfn); 1356 if (!p) 1357 return -ENXIO; 1358 memcpy(params, p, sizeof(*params)); 1359 1360 p = qed_mcp_get_link_state(hwfn); 1361 if (!p) 1362 return -ENXIO; 1363 memcpy(link, p, sizeof(*link)); 1364 1365 p = qed_mcp_get_link_capabilities(hwfn); 1366 if (!p) 1367 return -ENXIO; 1368 memcpy(link_caps, p, sizeof(*link_caps)); 1369 1370 return 0; 1371 } 1372 1373 static void qed_fill_link(struct qed_hwfn *hwfn, 1374 struct qed_link_output *if_link) 1375 { 1376 struct qed_mcp_link_params params; 1377 struct qed_mcp_link_state link; 1378 struct qed_mcp_link_capabilities link_caps; 1379 u32 media_type; 1380 1381 memset(if_link, 0, sizeof(*if_link)); 1382 1383 /* Prepare source inputs */ 1384 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1385 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1386 return; 1387 } 1388 1389 /* Set the link parameters to pass to protocol driver */ 1390 if (link.link_up) 1391 if_link->link_up = true; 1392 1393 /* TODO - at the moment assume supported and advertised speed equal */ 1394 if_link->supported_caps = QED_LM_FIBRE_BIT; 1395 if (link_caps.default_speed_autoneg) 1396 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1397 if (params.pause.autoneg || 1398 (params.pause.forced_rx && params.pause.forced_tx)) 1399 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1400 if (params.pause.autoneg || params.pause.forced_rx || 1401 params.pause.forced_tx) 1402 if_link->supported_caps |= QED_LM_Pause_BIT; 1403 1404 if_link->advertised_caps = if_link->supported_caps; 1405 if (params.speed.autoneg) 1406 if_link->advertised_caps |= QED_LM_Autoneg_BIT; 1407 else 1408 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; 1409 if (params.speed.advertised_speeds & 1410 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1411 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | 1412 QED_LM_1000baseT_Full_BIT; 1413 if (params.speed.advertised_speeds & 1414 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1415 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; 1416 if (params.speed.advertised_speeds & 1417 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1418 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; 1419 if (params.speed.advertised_speeds & 1420 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1421 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; 1422 if (params.speed.advertised_speeds & 1423 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1424 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; 1425 if (params.speed.advertised_speeds & 1426 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1427 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; 1428 1429 if (link_caps.speed_capabilities & 1430 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1431 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | 1432 QED_LM_1000baseT_Full_BIT; 1433 if (link_caps.speed_capabilities & 1434 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1435 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; 1436 if (link_caps.speed_capabilities & 1437 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1438 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; 1439 if (link_caps.speed_capabilities & 1440 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1441 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; 1442 if (link_caps.speed_capabilities & 1443 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1444 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; 1445 if (link_caps.speed_capabilities & 1446 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1447 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; 1448 1449 if (link.link_up) 1450 if_link->speed = link.speed; 1451 1452 /* TODO - fill duplex properly */ 1453 if_link->duplex = DUPLEX_FULL; 1454 qed_mcp_get_media_type(hwfn->cdev, &media_type); 1455 if_link->port = qed_get_port_type(media_type); 1456 1457 if_link->autoneg = params.speed.autoneg; 1458 1459 if (params.pause.autoneg) 1460 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1461 if (params.pause.forced_rx) 1462 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1463 if (params.pause.forced_tx) 1464 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1465 1466 /* Link partner capabilities */ 1467 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) 1468 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; 1469 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) 1470 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1471 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1472 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1473 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1474 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1475 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1476 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1477 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1478 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1479 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1480 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1481 1482 if (link.an_complete) 1483 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1484 1485 if (link.partner_adv_pause) 1486 if_link->lp_caps |= QED_LM_Pause_BIT; 1487 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1488 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1489 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1490 1491 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 1492 if_link->eee_supported = false; 1493 } else { 1494 if_link->eee_supported = true; 1495 if_link->eee_active = link.eee_active; 1496 if_link->sup_caps = link_caps.eee_speed_caps; 1497 /* MFW clears adv_caps on eee disable; use configured value */ 1498 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 1499 params.eee.adv_caps; 1500 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 1501 if_link->eee.enable = params.eee.enable; 1502 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 1503 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 1504 } 1505 } 1506 1507 static void qed_get_current_link(struct qed_dev *cdev, 1508 struct qed_link_output *if_link) 1509 { 1510 int i; 1511 1512 qed_fill_link(&cdev->hwfns[0], if_link); 1513 1514 for_each_hwfn(cdev, i) 1515 qed_inform_vf_link_state(&cdev->hwfns[i]); 1516 } 1517 1518 void qed_link_update(struct qed_hwfn *hwfn) 1519 { 1520 void *cookie = hwfn->cdev->ops_cookie; 1521 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1522 struct qed_link_output if_link; 1523 1524 qed_fill_link(hwfn, &if_link); 1525 qed_inform_vf_link_state(hwfn); 1526 1527 if (IS_LEAD_HWFN(hwfn) && cookie) 1528 op->link_update(cookie, &if_link); 1529 } 1530 1531 static int qed_drain(struct qed_dev *cdev) 1532 { 1533 struct qed_hwfn *hwfn; 1534 struct qed_ptt *ptt; 1535 int i, rc; 1536 1537 if (IS_VF(cdev)) 1538 return 0; 1539 1540 for_each_hwfn(cdev, i) { 1541 hwfn = &cdev->hwfns[i]; 1542 ptt = qed_ptt_acquire(hwfn); 1543 if (!ptt) { 1544 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1545 return -EBUSY; 1546 } 1547 rc = qed_mcp_drain(hwfn, ptt); 1548 if (rc) 1549 return rc; 1550 qed_ptt_release(hwfn, ptt); 1551 } 1552 1553 return 0; 1554 } 1555 1556 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 1557 u8 *buf, u16 len) 1558 { 1559 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1560 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1561 int rc; 1562 1563 if (!ptt) 1564 return -EAGAIN; 1565 1566 rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); 1567 qed_ptt_release(hwfn, ptt); 1568 return rc; 1569 } 1570 1571 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 1572 void *handle) 1573 { 1574 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 1575 } 1576 1577 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 1578 { 1579 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1580 struct qed_ptt *ptt; 1581 int status = 0; 1582 1583 ptt = qed_ptt_acquire(hwfn); 1584 if (!ptt) 1585 return -EAGAIN; 1586 1587 status = qed_mcp_set_led(hwfn, ptt, mode); 1588 1589 qed_ptt_release(hwfn, ptt); 1590 1591 return status; 1592 } 1593 1594 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 1595 { 1596 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1597 struct qed_ptt *ptt; 1598 int rc = 0; 1599 1600 if (IS_VF(cdev)) 1601 return 0; 1602 1603 ptt = qed_ptt_acquire(hwfn); 1604 if (!ptt) 1605 return -EAGAIN; 1606 1607 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 1608 : QED_OV_WOL_DISABLED); 1609 if (rc) 1610 goto out; 1611 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1612 1613 out: 1614 qed_ptt_release(hwfn, ptt); 1615 return rc; 1616 } 1617 1618 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 1619 { 1620 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1621 struct qed_ptt *ptt; 1622 int status = 0; 1623 1624 if (IS_VF(cdev)) 1625 return 0; 1626 1627 ptt = qed_ptt_acquire(hwfn); 1628 if (!ptt) 1629 return -EAGAIN; 1630 1631 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 1632 QED_OV_DRIVER_STATE_ACTIVE : 1633 QED_OV_DRIVER_STATE_DISABLED); 1634 1635 qed_ptt_release(hwfn, ptt); 1636 1637 return status; 1638 } 1639 1640 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 1641 { 1642 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1643 struct qed_ptt *ptt; 1644 int status = 0; 1645 1646 if (IS_VF(cdev)) 1647 return 0; 1648 1649 ptt = qed_ptt_acquire(hwfn); 1650 if (!ptt) 1651 return -EAGAIN; 1652 1653 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 1654 if (status) 1655 goto out; 1656 1657 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1658 1659 out: 1660 qed_ptt_release(hwfn, ptt); 1661 return status; 1662 } 1663 1664 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 1665 { 1666 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1667 struct qed_ptt *ptt; 1668 int status = 0; 1669 1670 if (IS_VF(cdev)) 1671 return 0; 1672 1673 ptt = qed_ptt_acquire(hwfn); 1674 if (!ptt) 1675 return -EAGAIN; 1676 1677 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 1678 if (status) 1679 goto out; 1680 1681 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 1682 1683 out: 1684 qed_ptt_release(hwfn, ptt); 1685 return status; 1686 } 1687 1688 static struct qed_selftest_ops qed_selftest_ops_pass = { 1689 .selftest_memory = &qed_selftest_memory, 1690 .selftest_interrupt = &qed_selftest_interrupt, 1691 .selftest_register = &qed_selftest_register, 1692 .selftest_clock = &qed_selftest_clock, 1693 .selftest_nvram = &qed_selftest_nvram, 1694 }; 1695 1696 const struct qed_common_ops qed_common_ops_pass = { 1697 .selftest = &qed_selftest_ops_pass, 1698 .probe = &qed_probe, 1699 .remove = &qed_remove, 1700 .set_power_state = &qed_set_power_state, 1701 .set_name = &qed_set_name, 1702 .update_pf_params = &qed_update_pf_params, 1703 .slowpath_start = &qed_slowpath_start, 1704 .slowpath_stop = &qed_slowpath_stop, 1705 .set_fp_int = &qed_set_int_fp, 1706 .get_fp_int = &qed_get_int_fp, 1707 .sb_init = &qed_sb_init, 1708 .sb_release = &qed_sb_release, 1709 .simd_handler_config = &qed_simd_handler_config, 1710 .simd_handler_clean = &qed_simd_handler_clean, 1711 .dbg_grc = &qed_dbg_grc, 1712 .dbg_grc_size = &qed_dbg_grc_size, 1713 .can_link_change = &qed_can_link_change, 1714 .set_link = &qed_set_link, 1715 .get_link = &qed_get_current_link, 1716 .drain = &qed_drain, 1717 .update_msglvl = &qed_init_dp, 1718 .dbg_all_data = &qed_dbg_all_data, 1719 .dbg_all_data_size = &qed_dbg_all_data_size, 1720 .chain_alloc = &qed_chain_alloc, 1721 .chain_free = &qed_chain_free, 1722 .nvm_get_image = &qed_nvm_get_image, 1723 .set_coalesce = &qed_set_coalesce, 1724 .set_led = &qed_set_led, 1725 .update_drv_state = &qed_update_drv_state, 1726 .update_mac = &qed_update_mac, 1727 .update_mtu = &qed_update_mtu, 1728 .update_wol = &qed_update_wol, 1729 }; 1730 1731 void qed_get_protocol_stats(struct qed_dev *cdev, 1732 enum qed_mcp_protocol_type type, 1733 union qed_mcp_protocol_stats *stats) 1734 { 1735 struct qed_eth_stats eth_stats; 1736 1737 memset(stats, 0, sizeof(*stats)); 1738 1739 switch (type) { 1740 case QED_MCP_LAN_STATS: 1741 qed_get_vport_stats(cdev, ð_stats); 1742 stats->lan_stats.ucast_rx_pkts = 1743 eth_stats.common.rx_ucast_pkts; 1744 stats->lan_stats.ucast_tx_pkts = 1745 eth_stats.common.tx_ucast_pkts; 1746 stats->lan_stats.fcs_err = -1; 1747 break; 1748 case QED_MCP_FCOE_STATS: 1749 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 1750 break; 1751 case QED_MCP_ISCSI_STATS: 1752 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 1753 break; 1754 default: 1755 DP_VERBOSE(cdev, QED_MSG_SP, 1756 "Invalid protocol type = %d\n", type); 1757 return; 1758 } 1759 } 1760