1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/string.h> 41 #include <linux/module.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/ethtool.h> 45 #include <linux/etherdevice.h> 46 #include <linux/vmalloc.h> 47 #include <linux/crash_dump.h> 48 #include <linux/crc32.h> 49 #include <linux/qed/qed_if.h> 50 #include <linux/qed/qed_ll2_if.h> 51 52 #include "qed.h" 53 #include "qed_sriov.h" 54 #include "qed_sp.h" 55 #include "qed_dev_api.h" 56 #include "qed_ll2.h" 57 #include "qed_fcoe.h" 58 #include "qed_iscsi.h" 59 60 #include "qed_mcp.h" 61 #include "qed_reg_addr.h" 62 #include "qed_hw.h" 63 #include "qed_selftest.h" 64 #include "qed_debug.h" 65 66 #define QED_ROCE_QPS (8192) 67 #define QED_ROCE_DPIS (8) 68 #define QED_RDMA_SRQS QED_ROCE_QPS 69 70 static char version[] = 71 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 72 73 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 74 MODULE_LICENSE("GPL"); 75 MODULE_VERSION(DRV_MODULE_VERSION); 76 77 #define FW_FILE_VERSION \ 78 __stringify(FW_MAJOR_VERSION) "." \ 79 __stringify(FW_MINOR_VERSION) "." \ 80 __stringify(FW_REVISION_VERSION) "." \ 81 __stringify(FW_ENGINEERING_VERSION) 82 83 #define QED_FW_FILE_NAME \ 84 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 85 86 MODULE_FIRMWARE(QED_FW_FILE_NAME); 87 88 static int __init qed_init(void) 89 { 90 pr_info("%s", version); 91 92 return 0; 93 } 94 95 static void __exit qed_cleanup(void) 96 { 97 pr_notice("qed_cleanup called\n"); 98 } 99 100 module_init(qed_init); 101 module_exit(qed_cleanup); 102 103 /* Check if the DMA controller on the machine can properly handle the DMA 104 * addressing required by the device. 105 */ 106 static int qed_set_coherency_mask(struct qed_dev *cdev) 107 { 108 struct device *dev = &cdev->pdev->dev; 109 110 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 111 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 112 DP_NOTICE(cdev, 113 "Can't request 64-bit consistent allocations\n"); 114 return -EIO; 115 } 116 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 117 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 118 return -EIO; 119 } 120 121 return 0; 122 } 123 124 static void qed_free_pci(struct qed_dev *cdev) 125 { 126 struct pci_dev *pdev = cdev->pdev; 127 128 if (cdev->doorbells && cdev->db_size) 129 iounmap(cdev->doorbells); 130 if (cdev->regview) 131 iounmap(cdev->regview); 132 if (atomic_read(&pdev->enable_cnt) == 1) 133 pci_release_regions(pdev); 134 135 pci_disable_device(pdev); 136 } 137 138 #define PCI_REVISION_ID_ERROR_VAL 0xff 139 140 /* Performs PCI initializations as well as initializing PCI-related parameters 141 * in the device structrue. Returns 0 in case of success. 142 */ 143 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 144 { 145 u8 rev_id; 146 int rc; 147 148 cdev->pdev = pdev; 149 150 rc = pci_enable_device(pdev); 151 if (rc) { 152 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 153 goto err0; 154 } 155 156 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 157 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 158 rc = -EIO; 159 goto err1; 160 } 161 162 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 163 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 164 rc = -EIO; 165 goto err1; 166 } 167 168 if (atomic_read(&pdev->enable_cnt) == 1) { 169 rc = pci_request_regions(pdev, "qed"); 170 if (rc) { 171 DP_NOTICE(cdev, 172 "Failed to request PCI memory resources\n"); 173 goto err1; 174 } 175 pci_set_master(pdev); 176 pci_save_state(pdev); 177 } 178 179 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 180 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 181 DP_NOTICE(cdev, 182 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 183 rev_id); 184 rc = -ENODEV; 185 goto err2; 186 } 187 if (!pci_is_pcie(pdev)) { 188 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 189 rc = -EIO; 190 goto err2; 191 } 192 193 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 194 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 195 DP_NOTICE(cdev, "Cannot find power management capability\n"); 196 197 rc = qed_set_coherency_mask(cdev); 198 if (rc) 199 goto err2; 200 201 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 202 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 203 cdev->pci_params.irq = pdev->irq; 204 205 cdev->regview = pci_ioremap_bar(pdev, 0); 206 if (!cdev->regview) { 207 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 208 rc = -ENOMEM; 209 goto err2; 210 } 211 212 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 213 cdev->db_size = pci_resource_len(cdev->pdev, 2); 214 if (!cdev->db_size) { 215 if (IS_PF(cdev)) { 216 DP_NOTICE(cdev, "No Doorbell bar available\n"); 217 return -EINVAL; 218 } else { 219 return 0; 220 } 221 } 222 223 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 224 225 if (!cdev->doorbells) { 226 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 227 return -ENOMEM; 228 } 229 230 return 0; 231 232 err2: 233 pci_release_regions(pdev); 234 err1: 235 pci_disable_device(pdev); 236 err0: 237 return rc; 238 } 239 240 int qed_fill_dev_info(struct qed_dev *cdev, 241 struct qed_dev_info *dev_info) 242 { 243 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 244 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 245 struct qed_tunnel_info *tun = &cdev->tunnel; 246 struct qed_ptt *ptt; 247 248 memset(dev_info, 0, sizeof(struct qed_dev_info)); 249 250 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 251 tun->vxlan.b_mode_enabled) 252 dev_info->vxlan_enable = true; 253 254 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 255 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 256 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 257 dev_info->gre_enable = true; 258 259 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 260 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 261 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 262 dev_info->geneve_enable = true; 263 264 dev_info->num_hwfns = cdev->num_hwfns; 265 dev_info->pci_mem_start = cdev->pci_params.mem_start; 266 dev_info->pci_mem_end = cdev->pci_params.mem_end; 267 dev_info->pci_irq = cdev->pci_params.irq; 268 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 269 dev_info->dev_type = cdev->type; 270 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 271 272 if (IS_PF(cdev)) { 273 dev_info->fw_major = FW_MAJOR_VERSION; 274 dev_info->fw_minor = FW_MINOR_VERSION; 275 dev_info->fw_rev = FW_REVISION_VERSION; 276 dev_info->fw_eng = FW_ENGINEERING_VERSION; 277 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 278 &cdev->mf_bits); 279 dev_info->tx_switching = true; 280 281 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 282 dev_info->wol_support = true; 283 284 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 285 } else { 286 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 287 &dev_info->fw_minor, &dev_info->fw_rev, 288 &dev_info->fw_eng); 289 } 290 291 if (IS_PF(cdev)) { 292 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 293 if (ptt) { 294 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 295 &dev_info->mfw_rev, NULL); 296 297 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 298 &dev_info->mbi_version); 299 300 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 301 &dev_info->flash_size); 302 303 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 304 } 305 } else { 306 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 307 &dev_info->mfw_rev, NULL); 308 } 309 310 dev_info->mtu = hw_info->mtu; 311 312 return 0; 313 } 314 315 static void qed_free_cdev(struct qed_dev *cdev) 316 { 317 kfree((void *)cdev); 318 } 319 320 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 321 { 322 struct qed_dev *cdev; 323 324 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 325 if (!cdev) 326 return cdev; 327 328 qed_init_struct(cdev); 329 330 return cdev; 331 } 332 333 /* Sets the requested power state */ 334 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 335 { 336 if (!cdev) 337 return -ENODEV; 338 339 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 340 return 0; 341 } 342 343 /* probing */ 344 static struct qed_dev *qed_probe(struct pci_dev *pdev, 345 struct qed_probe_params *params) 346 { 347 struct qed_dev *cdev; 348 int rc; 349 350 cdev = qed_alloc_cdev(pdev); 351 if (!cdev) 352 goto err0; 353 354 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 355 cdev->protocol = params->protocol; 356 357 if (params->is_vf) 358 cdev->b_is_vf = true; 359 360 qed_init_dp(cdev, params->dp_module, params->dp_level); 361 362 rc = qed_init_pci(cdev, pdev); 363 if (rc) { 364 DP_ERR(cdev, "init pci failed\n"); 365 goto err1; 366 } 367 DP_INFO(cdev, "PCI init completed successfully\n"); 368 369 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 370 if (rc) { 371 DP_ERR(cdev, "hw prepare failed\n"); 372 goto err2; 373 } 374 375 DP_INFO(cdev, "qed_probe completed successfully\n"); 376 377 return cdev; 378 379 err2: 380 qed_free_pci(cdev); 381 err1: 382 qed_free_cdev(cdev); 383 err0: 384 return NULL; 385 } 386 387 static void qed_remove(struct qed_dev *cdev) 388 { 389 if (!cdev) 390 return; 391 392 qed_hw_remove(cdev); 393 394 qed_free_pci(cdev); 395 396 qed_set_power_state(cdev, PCI_D3hot); 397 398 qed_free_cdev(cdev); 399 } 400 401 static void qed_disable_msix(struct qed_dev *cdev) 402 { 403 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 404 pci_disable_msix(cdev->pdev); 405 kfree(cdev->int_params.msix_table); 406 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 407 pci_disable_msi(cdev->pdev); 408 } 409 410 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 411 } 412 413 static int qed_enable_msix(struct qed_dev *cdev, 414 struct qed_int_params *int_params) 415 { 416 int i, rc, cnt; 417 418 cnt = int_params->in.num_vectors; 419 420 for (i = 0; i < cnt; i++) 421 int_params->msix_table[i].entry = i; 422 423 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 424 int_params->in.min_msix_cnt, cnt); 425 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 426 (rc % cdev->num_hwfns)) { 427 pci_disable_msix(cdev->pdev); 428 429 /* If fastpath is initialized, we need at least one interrupt 430 * per hwfn [and the slow path interrupts]. New requested number 431 * should be a multiple of the number of hwfns. 432 */ 433 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 434 DP_NOTICE(cdev, 435 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 436 cnt, int_params->in.num_vectors); 437 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 438 cnt); 439 if (!rc) 440 rc = cnt; 441 } 442 443 if (rc > 0) { 444 /* MSI-x configuration was achieved */ 445 int_params->out.int_mode = QED_INT_MODE_MSIX; 446 int_params->out.num_vectors = rc; 447 rc = 0; 448 } else { 449 DP_NOTICE(cdev, 450 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 451 cnt, rc); 452 } 453 454 return rc; 455 } 456 457 /* This function outputs the int mode and the number of enabled msix vector */ 458 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 459 { 460 struct qed_int_params *int_params = &cdev->int_params; 461 struct msix_entry *tbl; 462 int rc = 0, cnt; 463 464 switch (int_params->in.int_mode) { 465 case QED_INT_MODE_MSIX: 466 /* Allocate MSIX table */ 467 cnt = int_params->in.num_vectors; 468 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 469 if (!int_params->msix_table) { 470 rc = -ENOMEM; 471 goto out; 472 } 473 474 /* Enable MSIX */ 475 rc = qed_enable_msix(cdev, int_params); 476 if (!rc) 477 goto out; 478 479 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 480 kfree(int_params->msix_table); 481 if (force_mode) 482 goto out; 483 /* Fallthrough */ 484 485 case QED_INT_MODE_MSI: 486 if (cdev->num_hwfns == 1) { 487 rc = pci_enable_msi(cdev->pdev); 488 if (!rc) { 489 int_params->out.int_mode = QED_INT_MODE_MSI; 490 goto out; 491 } 492 493 DP_NOTICE(cdev, "Failed to enable MSI\n"); 494 if (force_mode) 495 goto out; 496 } 497 /* Fallthrough */ 498 499 case QED_INT_MODE_INTA: 500 int_params->out.int_mode = QED_INT_MODE_INTA; 501 rc = 0; 502 goto out; 503 default: 504 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 505 int_params->in.int_mode); 506 rc = -EINVAL; 507 } 508 509 out: 510 if (!rc) 511 DP_INFO(cdev, "Using %s interrupts\n", 512 int_params->out.int_mode == QED_INT_MODE_INTA ? 513 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 514 "MSI" : "MSIX"); 515 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 516 517 return rc; 518 } 519 520 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 521 int index, void(*handler)(void *)) 522 { 523 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 524 int relative_idx = index / cdev->num_hwfns; 525 526 hwfn->simd_proto_handler[relative_idx].func = handler; 527 hwfn->simd_proto_handler[relative_idx].token = token; 528 } 529 530 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 531 { 532 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 533 int relative_idx = index / cdev->num_hwfns; 534 535 memset(&hwfn->simd_proto_handler[relative_idx], 0, 536 sizeof(struct qed_simd_fp_handler)); 537 } 538 539 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 540 { 541 tasklet_schedule((struct tasklet_struct *)tasklet); 542 return IRQ_HANDLED; 543 } 544 545 static irqreturn_t qed_single_int(int irq, void *dev_instance) 546 { 547 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 548 struct qed_hwfn *hwfn; 549 irqreturn_t rc = IRQ_NONE; 550 u64 status; 551 int i, j; 552 553 for (i = 0; i < cdev->num_hwfns; i++) { 554 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 555 556 if (!status) 557 continue; 558 559 hwfn = &cdev->hwfns[i]; 560 561 /* Slowpath interrupt */ 562 if (unlikely(status & 0x1)) { 563 tasklet_schedule(hwfn->sp_dpc); 564 status &= ~0x1; 565 rc = IRQ_HANDLED; 566 } 567 568 /* Fastpath interrupts */ 569 for (j = 0; j < 64; j++) { 570 if ((0x2ULL << j) & status) { 571 struct qed_simd_fp_handler *p_handler = 572 &hwfn->simd_proto_handler[j]; 573 574 if (p_handler->func) 575 p_handler->func(p_handler->token); 576 else 577 DP_NOTICE(hwfn, 578 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 579 j, status); 580 581 status &= ~(0x2ULL << j); 582 rc = IRQ_HANDLED; 583 } 584 } 585 586 if (unlikely(status)) 587 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 588 "got an unknown interrupt status 0x%llx\n", 589 status); 590 } 591 592 return rc; 593 } 594 595 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 596 { 597 struct qed_dev *cdev = hwfn->cdev; 598 u32 int_mode; 599 int rc = 0; 600 u8 id; 601 602 int_mode = cdev->int_params.out.int_mode; 603 if (int_mode == QED_INT_MODE_MSIX) { 604 id = hwfn->my_id; 605 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 606 id, cdev->pdev->bus->number, 607 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 608 rc = request_irq(cdev->int_params.msix_table[id].vector, 609 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 610 } else { 611 unsigned long flags = 0; 612 613 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 614 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 615 PCI_FUNC(cdev->pdev->devfn)); 616 617 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 618 flags |= IRQF_SHARED; 619 620 rc = request_irq(cdev->pdev->irq, qed_single_int, 621 flags, cdev->name, cdev); 622 } 623 624 if (rc) 625 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 626 else 627 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 628 "Requested slowpath %s\n", 629 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 630 631 return rc; 632 } 633 634 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 635 { 636 /* Calling the disable function will make sure that any 637 * currently-running function is completed. The following call to the 638 * enable function makes this sequence a flush-like operation. 639 */ 640 if (p_hwfn->b_sp_dpc_enabled) { 641 tasklet_disable(p_hwfn->sp_dpc); 642 tasklet_enable(p_hwfn->sp_dpc); 643 } 644 } 645 646 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 647 { 648 struct qed_dev *cdev = p_hwfn->cdev; 649 u8 id = p_hwfn->my_id; 650 u32 int_mode; 651 652 int_mode = cdev->int_params.out.int_mode; 653 if (int_mode == QED_INT_MODE_MSIX) 654 synchronize_irq(cdev->int_params.msix_table[id].vector); 655 else 656 synchronize_irq(cdev->pdev->irq); 657 658 qed_slowpath_tasklet_flush(p_hwfn); 659 } 660 661 static void qed_slowpath_irq_free(struct qed_dev *cdev) 662 { 663 int i; 664 665 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 666 for_each_hwfn(cdev, i) { 667 if (!cdev->hwfns[i].b_int_requested) 668 break; 669 synchronize_irq(cdev->int_params.msix_table[i].vector); 670 free_irq(cdev->int_params.msix_table[i].vector, 671 cdev->hwfns[i].sp_dpc); 672 } 673 } else { 674 if (QED_LEADING_HWFN(cdev)->b_int_requested) 675 free_irq(cdev->pdev->irq, cdev); 676 } 677 qed_int_disable_post_isr_release(cdev); 678 } 679 680 static int qed_nic_stop(struct qed_dev *cdev) 681 { 682 int i, rc; 683 684 rc = qed_hw_stop(cdev); 685 686 for (i = 0; i < cdev->num_hwfns; i++) { 687 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 688 689 if (p_hwfn->b_sp_dpc_enabled) { 690 tasklet_disable(p_hwfn->sp_dpc); 691 p_hwfn->b_sp_dpc_enabled = false; 692 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 693 "Disabled sp tasklet [hwfn %d] at %p\n", 694 i, p_hwfn->sp_dpc); 695 } 696 } 697 698 qed_dbg_pf_exit(cdev); 699 700 return rc; 701 } 702 703 static int qed_nic_setup(struct qed_dev *cdev) 704 { 705 int rc, i; 706 707 /* Determine if interface is going to require LL2 */ 708 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 709 for (i = 0; i < cdev->num_hwfns; i++) { 710 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 711 712 p_hwfn->using_ll2 = true; 713 } 714 } 715 716 rc = qed_resc_alloc(cdev); 717 if (rc) 718 return rc; 719 720 DP_INFO(cdev, "Allocated qed resources\n"); 721 722 qed_resc_setup(cdev); 723 724 return rc; 725 } 726 727 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 728 { 729 int limit = 0; 730 731 /* Mark the fastpath as free/used */ 732 cdev->int_params.fp_initialized = cnt ? true : false; 733 734 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 735 limit = cdev->num_hwfns * 63; 736 else if (cdev->int_params.fp_msix_cnt) 737 limit = cdev->int_params.fp_msix_cnt; 738 739 if (!limit) 740 return -ENOMEM; 741 742 return min_t(int, cnt, limit); 743 } 744 745 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 746 { 747 memset(info, 0, sizeof(struct qed_int_info)); 748 749 if (!cdev->int_params.fp_initialized) { 750 DP_INFO(cdev, 751 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 752 return -EINVAL; 753 } 754 755 /* Need to expose only MSI-X information; Single IRQ is handled solely 756 * by qed. 757 */ 758 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 759 int msix_base = cdev->int_params.fp_msix_base; 760 761 info->msix_cnt = cdev->int_params.fp_msix_cnt; 762 info->msix = &cdev->int_params.msix_table[msix_base]; 763 } 764 765 return 0; 766 } 767 768 static int qed_slowpath_setup_int(struct qed_dev *cdev, 769 enum qed_int_mode int_mode) 770 { 771 struct qed_sb_cnt_info sb_cnt_info; 772 int num_l2_queues = 0; 773 int rc; 774 int i; 775 776 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 777 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 778 return -EINVAL; 779 } 780 781 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 782 cdev->int_params.in.int_mode = int_mode; 783 for_each_hwfn(cdev, i) { 784 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 785 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 786 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 787 cdev->int_params.in.num_vectors++; /* slowpath */ 788 } 789 790 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 791 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 792 793 if (is_kdump_kernel()) { 794 DP_INFO(cdev, 795 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 796 cdev->int_params.in.min_msix_cnt); 797 cdev->int_params.in.num_vectors = 798 cdev->int_params.in.min_msix_cnt; 799 } 800 801 rc = qed_set_int_mode(cdev, false); 802 if (rc) { 803 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 804 return rc; 805 } 806 807 cdev->int_params.fp_msix_base = cdev->num_hwfns; 808 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 809 cdev->num_hwfns; 810 811 if (!IS_ENABLED(CONFIG_QED_RDMA) || 812 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 813 return 0; 814 815 for_each_hwfn(cdev, i) 816 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 817 818 DP_VERBOSE(cdev, QED_MSG_RDMA, 819 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 820 cdev->int_params.fp_msix_cnt, num_l2_queues); 821 822 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 823 cdev->int_params.rdma_msix_cnt = 824 (cdev->int_params.fp_msix_cnt - num_l2_queues) 825 / cdev->num_hwfns; 826 cdev->int_params.rdma_msix_base = 827 cdev->int_params.fp_msix_base + num_l2_queues; 828 cdev->int_params.fp_msix_cnt = num_l2_queues; 829 } else { 830 cdev->int_params.rdma_msix_cnt = 0; 831 } 832 833 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 834 cdev->int_params.rdma_msix_cnt, 835 cdev->int_params.rdma_msix_base); 836 837 return 0; 838 } 839 840 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 841 { 842 int rc; 843 844 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 845 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 846 847 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 848 &cdev->int_params.in.num_vectors); 849 if (cdev->num_hwfns > 1) { 850 u8 vectors = 0; 851 852 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 853 cdev->int_params.in.num_vectors += vectors; 854 } 855 856 /* We want a minimum of one fastpath vector per vf hwfn */ 857 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 858 859 rc = qed_set_int_mode(cdev, true); 860 if (rc) 861 return rc; 862 863 cdev->int_params.fp_msix_base = 0; 864 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 865 866 return 0; 867 } 868 869 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 870 u8 *input_buf, u32 max_size, u8 *unzip_buf) 871 { 872 int rc; 873 874 p_hwfn->stream->next_in = input_buf; 875 p_hwfn->stream->avail_in = input_len; 876 p_hwfn->stream->next_out = unzip_buf; 877 p_hwfn->stream->avail_out = max_size; 878 879 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 880 881 if (rc != Z_OK) { 882 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 883 rc); 884 return 0; 885 } 886 887 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 888 zlib_inflateEnd(p_hwfn->stream); 889 890 if (rc != Z_OK && rc != Z_STREAM_END) { 891 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 892 p_hwfn->stream->msg, rc); 893 return 0; 894 } 895 896 return p_hwfn->stream->total_out / 4; 897 } 898 899 static int qed_alloc_stream_mem(struct qed_dev *cdev) 900 { 901 int i; 902 void *workspace; 903 904 for_each_hwfn(cdev, i) { 905 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 906 907 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 908 if (!p_hwfn->stream) 909 return -ENOMEM; 910 911 workspace = vzalloc(zlib_inflate_workspacesize()); 912 if (!workspace) 913 return -ENOMEM; 914 p_hwfn->stream->workspace = workspace; 915 } 916 917 return 0; 918 } 919 920 static void qed_free_stream_mem(struct qed_dev *cdev) 921 { 922 int i; 923 924 for_each_hwfn(cdev, i) { 925 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 926 927 if (!p_hwfn->stream) 928 return; 929 930 vfree(p_hwfn->stream->workspace); 931 kfree(p_hwfn->stream); 932 } 933 } 934 935 static void qed_update_pf_params(struct qed_dev *cdev, 936 struct qed_pf_params *params) 937 { 938 int i; 939 940 if (IS_ENABLED(CONFIG_QED_RDMA)) { 941 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 942 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 943 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 944 /* divide by 3 the MRs to avoid MF ILT overflow */ 945 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 946 } 947 948 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 949 params->eth_pf_params.num_arfs_filters = 0; 950 951 /* In case we might support RDMA, don't allow qede to be greedy 952 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 953 * per hwfn. 954 */ 955 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 956 u16 *num_cons; 957 958 num_cons = ¶ms->eth_pf_params.num_cons; 959 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 960 } 961 962 for (i = 0; i < cdev->num_hwfns; i++) { 963 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 964 965 p_hwfn->pf_params = *params; 966 } 967 } 968 969 #define QED_PERIODIC_DB_REC_COUNT 100 970 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 971 #define QED_PERIODIC_DB_REC_INTERVAL \ 972 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 973 #define QED_PERIODIC_DB_REC_WAIT_COUNT 10 974 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ 975 (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) 976 977 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 978 enum qed_slowpath_wq_flag wq_flag, 979 unsigned long delay) 980 { 981 if (!hwfn->slowpath_wq_active) 982 return -EINVAL; 983 984 /* Memory barrier for setting atomic bit */ 985 smp_mb__before_atomic(); 986 set_bit(wq_flag, &hwfn->slowpath_task_flags); 987 smp_mb__after_atomic(); 988 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 989 990 return 0; 991 } 992 993 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 994 { 995 /* Reset periodic Doorbell Recovery counter */ 996 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 997 998 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 999 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1000 &p_hwfn->slowpath_task_flags)) 1001 return; 1002 1003 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1004 QED_PERIODIC_DB_REC_INTERVAL); 1005 } 1006 1007 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1008 { 1009 int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; 1010 1011 if (IS_VF(cdev)) 1012 return; 1013 1014 for_each_hwfn(cdev, i) { 1015 if (!cdev->hwfns[i].slowpath_wq) 1016 continue; 1017 1018 /* Stop queuing new delayed works */ 1019 cdev->hwfns[i].slowpath_wq_active = false; 1020 1021 /* Wait until the last periodic doorbell recovery is executed */ 1022 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1023 &cdev->hwfns[i].slowpath_task_flags) && 1024 sleep_count--) 1025 msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); 1026 1027 flush_workqueue(cdev->hwfns[i].slowpath_wq); 1028 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1029 } 1030 } 1031 1032 static void qed_slowpath_task(struct work_struct *work) 1033 { 1034 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1035 slowpath_task.work); 1036 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1037 1038 if (!ptt) { 1039 if (hwfn->slowpath_wq_active) 1040 queue_delayed_work(hwfn->slowpath_wq, 1041 &hwfn->slowpath_task, 0); 1042 1043 return; 1044 } 1045 1046 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1047 &hwfn->slowpath_task_flags)) 1048 qed_mfw_process_tlv_req(hwfn, ptt); 1049 1050 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1051 &hwfn->slowpath_task_flags)) { 1052 qed_db_rec_handler(hwfn, ptt); 1053 if (hwfn->periodic_db_rec_count--) 1054 qed_slowpath_delayed_work(hwfn, 1055 QED_SLOWPATH_PERIODIC_DB_REC, 1056 QED_PERIODIC_DB_REC_INTERVAL); 1057 } 1058 1059 qed_ptt_release(hwfn, ptt); 1060 } 1061 1062 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1063 { 1064 struct qed_hwfn *hwfn; 1065 char name[NAME_SIZE]; 1066 int i; 1067 1068 if (IS_VF(cdev)) 1069 return 0; 1070 1071 for_each_hwfn(cdev, i) { 1072 hwfn = &cdev->hwfns[i]; 1073 1074 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1075 cdev->pdev->bus->number, 1076 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1077 1078 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1079 if (!hwfn->slowpath_wq) { 1080 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1081 return -ENOMEM; 1082 } 1083 1084 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1085 hwfn->slowpath_wq_active = true; 1086 } 1087 1088 return 0; 1089 } 1090 1091 static int qed_slowpath_start(struct qed_dev *cdev, 1092 struct qed_slowpath_params *params) 1093 { 1094 struct qed_drv_load_params drv_load_params; 1095 struct qed_hw_init_params hw_init_params; 1096 struct qed_mcp_drv_version drv_version; 1097 struct qed_tunnel_info tunn_info; 1098 const u8 *data = NULL; 1099 struct qed_hwfn *hwfn; 1100 struct qed_ptt *p_ptt; 1101 int rc = -EINVAL; 1102 1103 if (qed_iov_wq_start(cdev)) 1104 goto err; 1105 1106 if (qed_slowpath_wq_start(cdev)) 1107 goto err; 1108 1109 if (IS_PF(cdev)) { 1110 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1111 &cdev->pdev->dev); 1112 if (rc) { 1113 DP_NOTICE(cdev, 1114 "Failed to find fw file - /lib/firmware/%s\n", 1115 QED_FW_FILE_NAME); 1116 goto err; 1117 } 1118 1119 if (cdev->num_hwfns == 1) { 1120 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1121 if (p_ptt) { 1122 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1123 } else { 1124 DP_NOTICE(cdev, 1125 "Failed to acquire PTT for aRFS\n"); 1126 goto err; 1127 } 1128 } 1129 } 1130 1131 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1132 rc = qed_nic_setup(cdev); 1133 if (rc) 1134 goto err; 1135 1136 if (IS_PF(cdev)) 1137 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1138 else 1139 rc = qed_slowpath_vf_setup_int(cdev); 1140 if (rc) 1141 goto err1; 1142 1143 if (IS_PF(cdev)) { 1144 /* Allocate stream for unzipping */ 1145 rc = qed_alloc_stream_mem(cdev); 1146 if (rc) 1147 goto err2; 1148 1149 /* First Dword used to differentiate between various sources */ 1150 data = cdev->firmware->data + sizeof(u32); 1151 1152 qed_dbg_pf_init(cdev); 1153 } 1154 1155 /* Start the slowpath */ 1156 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1157 memset(&tunn_info, 0, sizeof(tunn_info)); 1158 tunn_info.vxlan.b_mode_enabled = true; 1159 tunn_info.l2_gre.b_mode_enabled = true; 1160 tunn_info.ip_gre.b_mode_enabled = true; 1161 tunn_info.l2_geneve.b_mode_enabled = true; 1162 tunn_info.ip_geneve.b_mode_enabled = true; 1163 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1164 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1165 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1166 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1167 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1168 hw_init_params.p_tunn = &tunn_info; 1169 hw_init_params.b_hw_start = true; 1170 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1171 hw_init_params.allow_npar_tx_switch = true; 1172 hw_init_params.bin_fw_data = data; 1173 1174 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1175 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1176 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1177 drv_load_params.avoid_eng_reset = false; 1178 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1179 hw_init_params.p_drv_load_params = &drv_load_params; 1180 1181 rc = qed_hw_init(cdev, &hw_init_params); 1182 if (rc) 1183 goto err2; 1184 1185 DP_INFO(cdev, 1186 "HW initialization and function start completed successfully\n"); 1187 1188 if (IS_PF(cdev)) { 1189 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1190 BIT(QED_MODE_L2GENEVE_TUNN) | 1191 BIT(QED_MODE_IPGENEVE_TUNN) | 1192 BIT(QED_MODE_L2GRE_TUNN) | 1193 BIT(QED_MODE_IPGRE_TUNN)); 1194 } 1195 1196 /* Allocate LL2 interface if needed */ 1197 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1198 rc = qed_ll2_alloc_if(cdev); 1199 if (rc) 1200 goto err3; 1201 } 1202 if (IS_PF(cdev)) { 1203 hwfn = QED_LEADING_HWFN(cdev); 1204 drv_version.version = (params->drv_major << 24) | 1205 (params->drv_minor << 16) | 1206 (params->drv_rev << 8) | 1207 (params->drv_eng); 1208 strlcpy(drv_version.name, params->name, 1209 MCP_DRV_VER_STR_SIZE - 4); 1210 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1211 &drv_version); 1212 if (rc) { 1213 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1214 return rc; 1215 } 1216 } 1217 1218 qed_reset_vport_stats(cdev); 1219 1220 return 0; 1221 1222 err3: 1223 qed_hw_stop(cdev); 1224 err2: 1225 qed_hw_timers_stop_all(cdev); 1226 if (IS_PF(cdev)) 1227 qed_slowpath_irq_free(cdev); 1228 qed_free_stream_mem(cdev); 1229 qed_disable_msix(cdev); 1230 err1: 1231 qed_resc_free(cdev); 1232 err: 1233 if (IS_PF(cdev)) 1234 release_firmware(cdev->firmware); 1235 1236 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1237 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1238 qed_ptt_release(QED_LEADING_HWFN(cdev), 1239 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1240 1241 qed_iov_wq_stop(cdev, false); 1242 1243 qed_slowpath_wq_stop(cdev); 1244 1245 return rc; 1246 } 1247 1248 static int qed_slowpath_stop(struct qed_dev *cdev) 1249 { 1250 if (!cdev) 1251 return -ENODEV; 1252 1253 qed_slowpath_wq_stop(cdev); 1254 1255 qed_ll2_dealloc_if(cdev); 1256 1257 if (IS_PF(cdev)) { 1258 if (cdev->num_hwfns == 1) 1259 qed_ptt_release(QED_LEADING_HWFN(cdev), 1260 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1261 qed_free_stream_mem(cdev); 1262 if (IS_QED_ETH_IF(cdev)) 1263 qed_sriov_disable(cdev, true); 1264 } 1265 1266 qed_nic_stop(cdev); 1267 1268 if (IS_PF(cdev)) 1269 qed_slowpath_irq_free(cdev); 1270 1271 qed_disable_msix(cdev); 1272 1273 qed_resc_free(cdev); 1274 1275 qed_iov_wq_stop(cdev, true); 1276 1277 if (IS_PF(cdev)) 1278 release_firmware(cdev->firmware); 1279 1280 return 0; 1281 } 1282 1283 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1284 { 1285 int i; 1286 1287 memcpy(cdev->name, name, NAME_SIZE); 1288 for_each_hwfn(cdev, i) 1289 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1290 } 1291 1292 static u32 qed_sb_init(struct qed_dev *cdev, 1293 struct qed_sb_info *sb_info, 1294 void *sb_virt_addr, 1295 dma_addr_t sb_phy_addr, u16 sb_id, 1296 enum qed_sb_type type) 1297 { 1298 struct qed_hwfn *p_hwfn; 1299 struct qed_ptt *p_ptt; 1300 int hwfn_index; 1301 u16 rel_sb_id; 1302 u8 n_hwfns; 1303 u32 rc; 1304 1305 /* RoCE uses single engine and CMT uses two engines. When using both 1306 * we force only a single engine. Storage uses only engine 0 too. 1307 */ 1308 if (type == QED_SB_TYPE_L2_QUEUE) 1309 n_hwfns = cdev->num_hwfns; 1310 else 1311 n_hwfns = 1; 1312 1313 hwfn_index = sb_id % n_hwfns; 1314 p_hwfn = &cdev->hwfns[hwfn_index]; 1315 rel_sb_id = sb_id / n_hwfns; 1316 1317 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1318 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1319 hwfn_index, rel_sb_id, sb_id); 1320 1321 if (IS_PF(p_hwfn->cdev)) { 1322 p_ptt = qed_ptt_acquire(p_hwfn); 1323 if (!p_ptt) 1324 return -EBUSY; 1325 1326 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1327 sb_phy_addr, rel_sb_id); 1328 qed_ptt_release(p_hwfn, p_ptt); 1329 } else { 1330 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1331 sb_phy_addr, rel_sb_id); 1332 } 1333 1334 return rc; 1335 } 1336 1337 static u32 qed_sb_release(struct qed_dev *cdev, 1338 struct qed_sb_info *sb_info, u16 sb_id) 1339 { 1340 struct qed_hwfn *p_hwfn; 1341 int hwfn_index; 1342 u16 rel_sb_id; 1343 u32 rc; 1344 1345 hwfn_index = sb_id % cdev->num_hwfns; 1346 p_hwfn = &cdev->hwfns[hwfn_index]; 1347 rel_sb_id = sb_id / cdev->num_hwfns; 1348 1349 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1350 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1351 hwfn_index, rel_sb_id, sb_id); 1352 1353 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1354 1355 return rc; 1356 } 1357 1358 static bool qed_can_link_change(struct qed_dev *cdev) 1359 { 1360 return true; 1361 } 1362 1363 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1364 { 1365 struct qed_hwfn *hwfn; 1366 struct qed_mcp_link_params *link_params; 1367 struct qed_ptt *ptt; 1368 u32 sup_caps; 1369 int rc; 1370 1371 if (!cdev) 1372 return -ENODEV; 1373 1374 /* The link should be set only once per PF */ 1375 hwfn = &cdev->hwfns[0]; 1376 1377 /* When VF wants to set link, force it to read the bulletin instead. 1378 * This mimics the PF behavior, where a noitification [both immediate 1379 * and possible later] would be generated when changing properties. 1380 */ 1381 if (IS_VF(cdev)) { 1382 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1383 return 0; 1384 } 1385 1386 ptt = qed_ptt_acquire(hwfn); 1387 if (!ptt) 1388 return -EBUSY; 1389 1390 link_params = qed_mcp_get_link_params(hwfn); 1391 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1392 link_params->speed.autoneg = params->autoneg; 1393 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1394 link_params->speed.advertised_speeds = 0; 1395 sup_caps = QED_LM_1000baseT_Full_BIT | 1396 QED_LM_1000baseKX_Full_BIT | 1397 QED_LM_1000baseX_Full_BIT; 1398 if (params->adv_speeds & sup_caps) 1399 link_params->speed.advertised_speeds |= 1400 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1401 sup_caps = QED_LM_10000baseT_Full_BIT | 1402 QED_LM_10000baseKR_Full_BIT | 1403 QED_LM_10000baseKX4_Full_BIT | 1404 QED_LM_10000baseR_FEC_BIT | 1405 QED_LM_10000baseCR_Full_BIT | 1406 QED_LM_10000baseSR_Full_BIT | 1407 QED_LM_10000baseLR_Full_BIT | 1408 QED_LM_10000baseLRM_Full_BIT; 1409 if (params->adv_speeds & sup_caps) 1410 link_params->speed.advertised_speeds |= 1411 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1412 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) 1413 link_params->speed.advertised_speeds |= 1414 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1415 sup_caps = QED_LM_25000baseKR_Full_BIT | 1416 QED_LM_25000baseCR_Full_BIT | 1417 QED_LM_25000baseSR_Full_BIT; 1418 if (params->adv_speeds & sup_caps) 1419 link_params->speed.advertised_speeds |= 1420 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1421 sup_caps = QED_LM_40000baseLR4_Full_BIT | 1422 QED_LM_40000baseKR4_Full_BIT | 1423 QED_LM_40000baseCR4_Full_BIT | 1424 QED_LM_40000baseSR4_Full_BIT; 1425 if (params->adv_speeds & sup_caps) 1426 link_params->speed.advertised_speeds |= 1427 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1428 sup_caps = QED_LM_50000baseKR2_Full_BIT | 1429 QED_LM_50000baseCR2_Full_BIT | 1430 QED_LM_50000baseSR2_Full_BIT; 1431 if (params->adv_speeds & sup_caps) 1432 link_params->speed.advertised_speeds |= 1433 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1434 sup_caps = QED_LM_100000baseKR4_Full_BIT | 1435 QED_LM_100000baseSR4_Full_BIT | 1436 QED_LM_100000baseCR4_Full_BIT | 1437 QED_LM_100000baseLR4_ER4_Full_BIT; 1438 if (params->adv_speeds & sup_caps) 1439 link_params->speed.advertised_speeds |= 1440 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1441 } 1442 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1443 link_params->speed.forced_speed = params->forced_speed; 1444 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1445 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1446 link_params->pause.autoneg = true; 1447 else 1448 link_params->pause.autoneg = false; 1449 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1450 link_params->pause.forced_rx = true; 1451 else 1452 link_params->pause.forced_rx = false; 1453 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1454 link_params->pause.forced_tx = true; 1455 else 1456 link_params->pause.forced_tx = false; 1457 } 1458 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1459 switch (params->loopback_mode) { 1460 case QED_LINK_LOOPBACK_INT_PHY: 1461 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1462 break; 1463 case QED_LINK_LOOPBACK_EXT_PHY: 1464 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1465 break; 1466 case QED_LINK_LOOPBACK_EXT: 1467 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1468 break; 1469 case QED_LINK_LOOPBACK_MAC: 1470 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1471 break; 1472 default: 1473 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1474 break; 1475 } 1476 } 1477 1478 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1479 memcpy(&link_params->eee, ¶ms->eee, 1480 sizeof(link_params->eee)); 1481 1482 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1483 1484 qed_ptt_release(hwfn, ptt); 1485 1486 return rc; 1487 } 1488 1489 static int qed_get_port_type(u32 media_type) 1490 { 1491 int port_type; 1492 1493 switch (media_type) { 1494 case MEDIA_SFPP_10G_FIBER: 1495 case MEDIA_SFP_1G_FIBER: 1496 case MEDIA_XFP_FIBER: 1497 case MEDIA_MODULE_FIBER: 1498 case MEDIA_KR: 1499 port_type = PORT_FIBRE; 1500 break; 1501 case MEDIA_DA_TWINAX: 1502 port_type = PORT_DA; 1503 break; 1504 case MEDIA_BASE_T: 1505 port_type = PORT_TP; 1506 break; 1507 case MEDIA_NOT_PRESENT: 1508 port_type = PORT_NONE; 1509 break; 1510 case MEDIA_UNSPECIFIED: 1511 default: 1512 port_type = PORT_OTHER; 1513 break; 1514 } 1515 return port_type; 1516 } 1517 1518 static int qed_get_link_data(struct qed_hwfn *hwfn, 1519 struct qed_mcp_link_params *params, 1520 struct qed_mcp_link_state *link, 1521 struct qed_mcp_link_capabilities *link_caps) 1522 { 1523 void *p; 1524 1525 if (!IS_PF(hwfn->cdev)) { 1526 qed_vf_get_link_params(hwfn, params); 1527 qed_vf_get_link_state(hwfn, link); 1528 qed_vf_get_link_caps(hwfn, link_caps); 1529 1530 return 0; 1531 } 1532 1533 p = qed_mcp_get_link_params(hwfn); 1534 if (!p) 1535 return -ENXIO; 1536 memcpy(params, p, sizeof(*params)); 1537 1538 p = qed_mcp_get_link_state(hwfn); 1539 if (!p) 1540 return -ENXIO; 1541 memcpy(link, p, sizeof(*link)); 1542 1543 p = qed_mcp_get_link_capabilities(hwfn); 1544 if (!p) 1545 return -ENXIO; 1546 memcpy(link_caps, p, sizeof(*link_caps)); 1547 1548 return 0; 1549 } 1550 1551 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1552 struct qed_ptt *ptt, u32 capability, 1553 u32 *if_capability) 1554 { 1555 u32 media_type, tcvr_state, tcvr_type; 1556 u32 speed_mask, board_cfg; 1557 1558 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1559 media_type = MEDIA_UNSPECIFIED; 1560 1561 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1562 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1563 1564 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1565 speed_mask = 0xFFFFFFFF; 1566 1567 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1568 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1569 1570 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1571 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1572 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1573 1574 switch (media_type) { 1575 case MEDIA_DA_TWINAX: 1576 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1577 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1578 /* For DAC media multiple speed capabilities are supported*/ 1579 capability = capability & speed_mask; 1580 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1581 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1582 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1583 *if_capability |= QED_LM_10000baseCR_Full_BIT; 1584 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1585 *if_capability |= QED_LM_40000baseCR4_Full_BIT; 1586 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1587 *if_capability |= QED_LM_25000baseCR_Full_BIT; 1588 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1589 *if_capability |= QED_LM_50000baseCR2_Full_BIT; 1590 if (capability & 1591 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1592 *if_capability |= QED_LM_100000baseCR4_Full_BIT; 1593 break; 1594 case MEDIA_BASE_T: 1595 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1596 if (capability & 1597 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1598 *if_capability |= QED_LM_1000baseT_Full_BIT; 1599 } 1600 if (capability & 1601 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1602 *if_capability |= QED_LM_10000baseT_Full_BIT; 1603 } 1604 } 1605 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1606 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) 1607 *if_capability |= QED_LM_1000baseT_Full_BIT; 1608 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) 1609 *if_capability |= QED_LM_10000baseT_Full_BIT; 1610 } 1611 break; 1612 case MEDIA_SFP_1G_FIBER: 1613 case MEDIA_SFPP_10G_FIBER: 1614 case MEDIA_XFP_FIBER: 1615 case MEDIA_MODULE_FIBER: 1616 if (capability & 1617 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1618 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || 1619 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) 1620 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1621 } 1622 if (capability & 1623 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1624 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) 1625 *if_capability |= QED_LM_10000baseSR_Full_BIT; 1626 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) 1627 *if_capability |= QED_LM_10000baseLR_Full_BIT; 1628 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) 1629 *if_capability |= QED_LM_10000baseLRM_Full_BIT; 1630 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) 1631 *if_capability |= QED_LM_10000baseR_FEC_BIT; 1632 } 1633 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1634 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1635 if (capability & 1636 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { 1637 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) 1638 *if_capability |= QED_LM_25000baseSR_Full_BIT; 1639 } 1640 if (capability & 1641 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { 1642 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) 1643 *if_capability |= QED_LM_40000baseLR4_Full_BIT; 1644 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) 1645 *if_capability |= QED_LM_40000baseSR4_Full_BIT; 1646 } 1647 if (capability & 1648 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1649 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1650 if (capability & 1651 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { 1652 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) 1653 *if_capability |= QED_LM_100000baseSR4_Full_BIT; 1654 } 1655 1656 break; 1657 case MEDIA_KR: 1658 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1659 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1660 if (capability & 1661 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1662 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1663 if (capability & 1664 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1665 *if_capability |= QED_LM_10000baseKR_Full_BIT; 1666 if (capability & 1667 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1668 *if_capability |= QED_LM_25000baseKR_Full_BIT; 1669 if (capability & 1670 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1671 *if_capability |= QED_LM_40000baseKR4_Full_BIT; 1672 if (capability & 1673 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1674 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1675 if (capability & 1676 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1677 *if_capability |= QED_LM_100000baseKR4_Full_BIT; 1678 break; 1679 case MEDIA_UNSPECIFIED: 1680 case MEDIA_NOT_PRESENT: 1681 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 1682 "Unknown media and transceiver type;\n"); 1683 break; 1684 } 1685 } 1686 1687 static void qed_fill_link(struct qed_hwfn *hwfn, 1688 struct qed_ptt *ptt, 1689 struct qed_link_output *if_link) 1690 { 1691 struct qed_mcp_link_capabilities link_caps; 1692 struct qed_mcp_link_params params; 1693 struct qed_mcp_link_state link; 1694 u32 media_type; 1695 1696 memset(if_link, 0, sizeof(*if_link)); 1697 1698 /* Prepare source inputs */ 1699 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1700 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1701 return; 1702 } 1703 1704 /* Set the link parameters to pass to protocol driver */ 1705 if (link.link_up) 1706 if_link->link_up = true; 1707 1708 /* TODO - at the moment assume supported and advertised speed equal */ 1709 if_link->supported_caps = QED_LM_FIBRE_BIT; 1710 if (link_caps.default_speed_autoneg) 1711 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1712 if (params.pause.autoneg || 1713 (params.pause.forced_rx && params.pause.forced_tx)) 1714 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1715 if (params.pause.autoneg || params.pause.forced_rx || 1716 params.pause.forced_tx) 1717 if_link->supported_caps |= QED_LM_Pause_BIT; 1718 1719 if_link->advertised_caps = if_link->supported_caps; 1720 if (params.speed.autoneg) 1721 if_link->advertised_caps |= QED_LM_Autoneg_BIT; 1722 else 1723 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; 1724 1725 /* Fill link advertised capability*/ 1726 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 1727 &if_link->advertised_caps); 1728 /* Fill link supported capability*/ 1729 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 1730 &if_link->supported_caps); 1731 1732 if (link.link_up) 1733 if_link->speed = link.speed; 1734 1735 /* TODO - fill duplex properly */ 1736 if_link->duplex = DUPLEX_FULL; 1737 qed_mcp_get_media_type(hwfn, ptt, &media_type); 1738 if_link->port = qed_get_port_type(media_type); 1739 1740 if_link->autoneg = params.speed.autoneg; 1741 1742 if (params.pause.autoneg) 1743 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1744 if (params.pause.forced_rx) 1745 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1746 if (params.pause.forced_tx) 1747 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1748 1749 /* Link partner capabilities */ 1750 if (link.partner_adv_speed & 1751 QED_LINK_PARTNER_SPEED_1G_FD) 1752 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1753 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1754 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1755 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) 1756 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; 1757 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1758 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1759 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1760 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1761 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1762 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1763 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1764 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1765 1766 if (link.an_complete) 1767 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1768 1769 if (link.partner_adv_pause) 1770 if_link->lp_caps |= QED_LM_Pause_BIT; 1771 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1772 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1773 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1774 1775 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 1776 if_link->eee_supported = false; 1777 } else { 1778 if_link->eee_supported = true; 1779 if_link->eee_active = link.eee_active; 1780 if_link->sup_caps = link_caps.eee_speed_caps; 1781 /* MFW clears adv_caps on eee disable; use configured value */ 1782 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 1783 params.eee.adv_caps; 1784 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 1785 if_link->eee.enable = params.eee.enable; 1786 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 1787 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 1788 } 1789 } 1790 1791 static void qed_get_current_link(struct qed_dev *cdev, 1792 struct qed_link_output *if_link) 1793 { 1794 struct qed_hwfn *hwfn; 1795 struct qed_ptt *ptt; 1796 int i; 1797 1798 hwfn = &cdev->hwfns[0]; 1799 if (IS_PF(cdev)) { 1800 ptt = qed_ptt_acquire(hwfn); 1801 if (ptt) { 1802 qed_fill_link(hwfn, ptt, if_link); 1803 qed_ptt_release(hwfn, ptt); 1804 } else { 1805 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 1806 } 1807 } else { 1808 qed_fill_link(hwfn, NULL, if_link); 1809 } 1810 1811 for_each_hwfn(cdev, i) 1812 qed_inform_vf_link_state(&cdev->hwfns[i]); 1813 } 1814 1815 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 1816 { 1817 void *cookie = hwfn->cdev->ops_cookie; 1818 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1819 struct qed_link_output if_link; 1820 1821 qed_fill_link(hwfn, ptt, &if_link); 1822 qed_inform_vf_link_state(hwfn); 1823 1824 if (IS_LEAD_HWFN(hwfn) && cookie) 1825 op->link_update(cookie, &if_link); 1826 } 1827 1828 static int qed_drain(struct qed_dev *cdev) 1829 { 1830 struct qed_hwfn *hwfn; 1831 struct qed_ptt *ptt; 1832 int i, rc; 1833 1834 if (IS_VF(cdev)) 1835 return 0; 1836 1837 for_each_hwfn(cdev, i) { 1838 hwfn = &cdev->hwfns[i]; 1839 ptt = qed_ptt_acquire(hwfn); 1840 if (!ptt) { 1841 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1842 return -EBUSY; 1843 } 1844 rc = qed_mcp_drain(hwfn, ptt); 1845 qed_ptt_release(hwfn, ptt); 1846 if (rc) 1847 return rc; 1848 } 1849 1850 return 0; 1851 } 1852 1853 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 1854 struct qed_nvm_image_att *nvm_image, 1855 u32 *crc) 1856 { 1857 u8 *buf = NULL; 1858 int rc, j; 1859 u32 val; 1860 1861 /* Allocate a buffer for holding the nvram image */ 1862 buf = kzalloc(nvm_image->length, GFP_KERNEL); 1863 if (!buf) 1864 return -ENOMEM; 1865 1866 /* Read image into buffer */ 1867 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 1868 buf, nvm_image->length); 1869 if (rc) { 1870 DP_ERR(cdev, "Failed reading image from nvm\n"); 1871 goto out; 1872 } 1873 1874 /* Convert the buffer into big-endian format (excluding the 1875 * closing 4 bytes of CRC). 1876 */ 1877 for (j = 0; j < nvm_image->length - 4; j += 4) { 1878 val = cpu_to_be32(*(u32 *)&buf[j]); 1879 *(u32 *)&buf[j] = val; 1880 } 1881 1882 /* Calc CRC for the "actual" image buffer, i.e. not including 1883 * the last 4 CRC bytes. 1884 */ 1885 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); 1886 1887 out: 1888 kfree(buf); 1889 1890 return rc; 1891 } 1892 1893 /* Binary file format - 1894 * /----------------------------------------------------------------------\ 1895 * 0B | 0x4 [command index] | 1896 * 4B | image_type | Options | Number of register settings | 1897 * 8B | Value | 1898 * 12B | Mask | 1899 * 16B | Offset | 1900 * \----------------------------------------------------------------------/ 1901 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 1902 * Options - 0'b - Calculate & Update CRC for image 1903 */ 1904 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 1905 bool *check_resp) 1906 { 1907 struct qed_nvm_image_att nvm_image; 1908 struct qed_hwfn *p_hwfn; 1909 bool is_crc = false; 1910 u32 image_type; 1911 int rc = 0, i; 1912 u16 len; 1913 1914 *data += 4; 1915 image_type = **data; 1916 p_hwfn = QED_LEADING_HWFN(cdev); 1917 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 1918 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 1919 break; 1920 if (i == p_hwfn->nvm_info.num_images) { 1921 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 1922 image_type); 1923 return -ENOENT; 1924 } 1925 1926 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 1927 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 1928 1929 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1930 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 1931 **data, image_type, nvm_image.start_addr, 1932 nvm_image.start_addr + nvm_image.length - 1); 1933 (*data)++; 1934 is_crc = !!(**data & BIT(0)); 1935 (*data)++; 1936 len = *((u16 *)*data); 1937 *data += 2; 1938 if (is_crc) { 1939 u32 crc = 0; 1940 1941 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 1942 if (rc) { 1943 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 1944 goto exit; 1945 } 1946 1947 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 1948 (nvm_image.start_addr + 1949 nvm_image.length - 4), (u8 *)&crc, 4); 1950 if (rc) 1951 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 1952 nvm_image.start_addr + nvm_image.length - 4, rc); 1953 goto exit; 1954 } 1955 1956 /* Iterate over the values for setting */ 1957 while (len) { 1958 u32 offset, mask, value, cur_value; 1959 u8 buf[4]; 1960 1961 value = *((u32 *)*data); 1962 *data += 4; 1963 mask = *((u32 *)*data); 1964 *data += 4; 1965 offset = *((u32 *)*data); 1966 *data += 4; 1967 1968 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 1969 4); 1970 if (rc) { 1971 DP_ERR(cdev, "Failed reading from %08x\n", 1972 nvm_image.start_addr + offset); 1973 goto exit; 1974 } 1975 1976 cur_value = le32_to_cpu(*((__le32 *)buf)); 1977 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1978 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 1979 nvm_image.start_addr + offset, cur_value, 1980 (cur_value & ~mask) | (value & mask), value, mask); 1981 value = (value & mask) | (cur_value & ~mask); 1982 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 1983 nvm_image.start_addr + offset, 1984 (u8 *)&value, 4); 1985 if (rc) { 1986 DP_ERR(cdev, "Failed writing to %08x\n", 1987 nvm_image.start_addr + offset); 1988 goto exit; 1989 } 1990 1991 len--; 1992 } 1993 exit: 1994 return rc; 1995 } 1996 1997 /* Binary file format - 1998 * /----------------------------------------------------------------------\ 1999 * 0B | 0x3 [command index] | 2000 * 4B | b'0: check_response? | b'1-31 reserved | 2001 * 8B | File-type | reserved | 2002 * 12B | Image length in bytes | 2003 * \----------------------------------------------------------------------/ 2004 * Start a new file of the provided type 2005 */ 2006 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2007 const u8 **data, bool *check_resp) 2008 { 2009 u32 file_type, file_size = 0; 2010 int rc; 2011 2012 *data += 4; 2013 *check_resp = !!(**data & BIT(0)); 2014 *data += 4; 2015 file_type = **data; 2016 2017 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2018 "About to start a new file of type %02x\n", file_type); 2019 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2020 *data += 4; 2021 file_size = *((u32 *)(*data)); 2022 } 2023 2024 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2025 (u8 *)(&file_size), 4); 2026 *data += 4; 2027 2028 return rc; 2029 } 2030 2031 /* Binary file format - 2032 * /----------------------------------------------------------------------\ 2033 * 0B | 0x2 [command index] | 2034 * 4B | Length in bytes | 2035 * 8B | b'0: check_response? | b'1-31 reserved | 2036 * 12B | Offset in bytes | 2037 * 16B | Data ... | 2038 * \----------------------------------------------------------------------/ 2039 * Write data as part of a file that was previously started. Data should be 2040 * of length equal to that provided in the message 2041 */ 2042 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2043 const u8 **data, bool *check_resp) 2044 { 2045 u32 offset, len; 2046 int rc; 2047 2048 *data += 4; 2049 len = *((u32 *)(*data)); 2050 *data += 4; 2051 *check_resp = !!(**data & BIT(0)); 2052 *data += 4; 2053 offset = *((u32 *)(*data)); 2054 *data += 4; 2055 2056 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2057 "About to write File-data: %08x bytes to offset %08x\n", 2058 len, offset); 2059 2060 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2061 (char *)(*data), len); 2062 *data += len; 2063 2064 return rc; 2065 } 2066 2067 /* Binary file format [General header] - 2068 * /----------------------------------------------------------------------\ 2069 * 0B | QED_NVM_SIGNATURE | 2070 * 4B | Length in bytes | 2071 * 8B | Highest command in this batchfile | Reserved | 2072 * \----------------------------------------------------------------------/ 2073 */ 2074 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2075 const struct firmware *image, 2076 const u8 **data) 2077 { 2078 u32 signature, len; 2079 2080 /* Check minimum size */ 2081 if (image->size < 12) { 2082 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2083 return -EINVAL; 2084 } 2085 2086 /* Check signature */ 2087 signature = *((u32 *)(*data)); 2088 if (signature != QED_NVM_SIGNATURE) { 2089 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2090 return -EINVAL; 2091 } 2092 2093 *data += 4; 2094 /* Validate internal size equals the image-size */ 2095 len = *((u32 *)(*data)); 2096 if (len != image->size) { 2097 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2098 len, (u32)image->size); 2099 return -EINVAL; 2100 } 2101 2102 *data += 4; 2103 /* Make sure driver familiar with all commands necessary for this */ 2104 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2105 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2106 *((u16 *)(*data))); 2107 return -EINVAL; 2108 } 2109 2110 *data += 4; 2111 2112 return 0; 2113 } 2114 2115 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2116 { 2117 const struct firmware *image; 2118 const u8 *data, *data_end; 2119 u32 cmd_type; 2120 int rc; 2121 2122 rc = request_firmware(&image, name, &cdev->pdev->dev); 2123 if (rc) { 2124 DP_ERR(cdev, "Failed to find '%s'\n", name); 2125 return rc; 2126 } 2127 2128 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2129 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2130 name, image->data, (u32)image->size); 2131 data = image->data; 2132 data_end = data + image->size; 2133 2134 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2135 if (rc) 2136 goto exit; 2137 2138 while (data < data_end) { 2139 bool check_resp = false; 2140 2141 /* Parse the actual command */ 2142 cmd_type = *((u32 *)data); 2143 switch (cmd_type) { 2144 case QED_NVM_FLASH_CMD_FILE_DATA: 2145 rc = qed_nvm_flash_image_file_data(cdev, &data, 2146 &check_resp); 2147 break; 2148 case QED_NVM_FLASH_CMD_FILE_START: 2149 rc = qed_nvm_flash_image_file_start(cdev, &data, 2150 &check_resp); 2151 break; 2152 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2153 rc = qed_nvm_flash_image_access(cdev, &data, 2154 &check_resp); 2155 break; 2156 default: 2157 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2158 rc = -EINVAL; 2159 goto exit; 2160 } 2161 2162 if (rc) { 2163 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2164 goto exit; 2165 } 2166 2167 /* Check response if needed */ 2168 if (check_resp) { 2169 u32 mcp_response = 0; 2170 2171 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2172 DP_ERR(cdev, "Failed getting MCP response\n"); 2173 rc = -EINVAL; 2174 goto exit; 2175 } 2176 2177 switch (mcp_response & FW_MSG_CODE_MASK) { 2178 case FW_MSG_CODE_OK: 2179 case FW_MSG_CODE_NVM_OK: 2180 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2181 case FW_MSG_CODE_PHY_OK: 2182 break; 2183 default: 2184 DP_ERR(cdev, "MFW returns error: %08x\n", 2185 mcp_response); 2186 rc = -EINVAL; 2187 goto exit; 2188 } 2189 } 2190 } 2191 2192 exit: 2193 release_firmware(image); 2194 2195 return rc; 2196 } 2197 2198 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2199 u8 *buf, u16 len) 2200 { 2201 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2202 2203 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2204 } 2205 2206 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2207 void *handle) 2208 { 2209 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2210 } 2211 2212 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2213 { 2214 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2215 struct qed_ptt *ptt; 2216 int status = 0; 2217 2218 ptt = qed_ptt_acquire(hwfn); 2219 if (!ptt) 2220 return -EAGAIN; 2221 2222 status = qed_mcp_set_led(hwfn, ptt, mode); 2223 2224 qed_ptt_release(hwfn, ptt); 2225 2226 return status; 2227 } 2228 2229 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2230 { 2231 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2232 struct qed_ptt *ptt; 2233 int rc = 0; 2234 2235 if (IS_VF(cdev)) 2236 return 0; 2237 2238 ptt = qed_ptt_acquire(hwfn); 2239 if (!ptt) 2240 return -EAGAIN; 2241 2242 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2243 : QED_OV_WOL_DISABLED); 2244 if (rc) 2245 goto out; 2246 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2247 2248 out: 2249 qed_ptt_release(hwfn, ptt); 2250 return rc; 2251 } 2252 2253 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2254 { 2255 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2256 struct qed_ptt *ptt; 2257 int status = 0; 2258 2259 if (IS_VF(cdev)) 2260 return 0; 2261 2262 ptt = qed_ptt_acquire(hwfn); 2263 if (!ptt) 2264 return -EAGAIN; 2265 2266 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2267 QED_OV_DRIVER_STATE_ACTIVE : 2268 QED_OV_DRIVER_STATE_DISABLED); 2269 2270 qed_ptt_release(hwfn, ptt); 2271 2272 return status; 2273 } 2274 2275 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2276 { 2277 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2278 struct qed_ptt *ptt; 2279 int status = 0; 2280 2281 if (IS_VF(cdev)) 2282 return 0; 2283 2284 ptt = qed_ptt_acquire(hwfn); 2285 if (!ptt) 2286 return -EAGAIN; 2287 2288 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2289 if (status) 2290 goto out; 2291 2292 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2293 2294 out: 2295 qed_ptt_release(hwfn, ptt); 2296 return status; 2297 } 2298 2299 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2300 { 2301 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2302 struct qed_ptt *ptt; 2303 int status = 0; 2304 2305 if (IS_VF(cdev)) 2306 return 0; 2307 2308 ptt = qed_ptt_acquire(hwfn); 2309 if (!ptt) 2310 return -EAGAIN; 2311 2312 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2313 if (status) 2314 goto out; 2315 2316 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2317 2318 out: 2319 qed_ptt_release(hwfn, ptt); 2320 return status; 2321 } 2322 2323 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2324 u8 dev_addr, u32 offset, u32 len) 2325 { 2326 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2327 struct qed_ptt *ptt; 2328 int rc = 0; 2329 2330 if (IS_VF(cdev)) 2331 return 0; 2332 2333 ptt = qed_ptt_acquire(hwfn); 2334 if (!ptt) 2335 return -EAGAIN; 2336 2337 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2338 offset, len, buf); 2339 2340 qed_ptt_release(hwfn, ptt); 2341 2342 return rc; 2343 } 2344 2345 static struct qed_selftest_ops qed_selftest_ops_pass = { 2346 .selftest_memory = &qed_selftest_memory, 2347 .selftest_interrupt = &qed_selftest_interrupt, 2348 .selftest_register = &qed_selftest_register, 2349 .selftest_clock = &qed_selftest_clock, 2350 .selftest_nvram = &qed_selftest_nvram, 2351 }; 2352 2353 const struct qed_common_ops qed_common_ops_pass = { 2354 .selftest = &qed_selftest_ops_pass, 2355 .probe = &qed_probe, 2356 .remove = &qed_remove, 2357 .set_power_state = &qed_set_power_state, 2358 .set_name = &qed_set_name, 2359 .update_pf_params = &qed_update_pf_params, 2360 .slowpath_start = &qed_slowpath_start, 2361 .slowpath_stop = &qed_slowpath_stop, 2362 .set_fp_int = &qed_set_int_fp, 2363 .get_fp_int = &qed_get_int_fp, 2364 .sb_init = &qed_sb_init, 2365 .sb_release = &qed_sb_release, 2366 .simd_handler_config = &qed_simd_handler_config, 2367 .simd_handler_clean = &qed_simd_handler_clean, 2368 .dbg_grc = &qed_dbg_grc, 2369 .dbg_grc_size = &qed_dbg_grc_size, 2370 .can_link_change = &qed_can_link_change, 2371 .set_link = &qed_set_link, 2372 .get_link = &qed_get_current_link, 2373 .drain = &qed_drain, 2374 .update_msglvl = &qed_init_dp, 2375 .dbg_all_data = &qed_dbg_all_data, 2376 .dbg_all_data_size = &qed_dbg_all_data_size, 2377 .chain_alloc = &qed_chain_alloc, 2378 .chain_free = &qed_chain_free, 2379 .nvm_flash = &qed_nvm_flash, 2380 .nvm_get_image = &qed_nvm_get_image, 2381 .set_coalesce = &qed_set_coalesce, 2382 .set_led = &qed_set_led, 2383 .update_drv_state = &qed_update_drv_state, 2384 .update_mac = &qed_update_mac, 2385 .update_mtu = &qed_update_mtu, 2386 .update_wol = &qed_update_wol, 2387 .db_recovery_add = &qed_db_recovery_add, 2388 .db_recovery_del = &qed_db_recovery_del, 2389 .read_module_eeprom = &qed_read_module_eeprom, 2390 }; 2391 2392 void qed_get_protocol_stats(struct qed_dev *cdev, 2393 enum qed_mcp_protocol_type type, 2394 union qed_mcp_protocol_stats *stats) 2395 { 2396 struct qed_eth_stats eth_stats; 2397 2398 memset(stats, 0, sizeof(*stats)); 2399 2400 switch (type) { 2401 case QED_MCP_LAN_STATS: 2402 qed_get_vport_stats(cdev, ð_stats); 2403 stats->lan_stats.ucast_rx_pkts = 2404 eth_stats.common.rx_ucast_pkts; 2405 stats->lan_stats.ucast_tx_pkts = 2406 eth_stats.common.tx_ucast_pkts; 2407 stats->lan_stats.fcs_err = -1; 2408 break; 2409 case QED_MCP_FCOE_STATS: 2410 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 2411 break; 2412 case QED_MCP_ISCSI_STATS: 2413 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 2414 break; 2415 default: 2416 DP_VERBOSE(cdev, QED_MSG_SP, 2417 "Invalid protocol type = %d\n", type); 2418 return; 2419 } 2420 } 2421 2422 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 2423 { 2424 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 2425 "Scheduling slowpath task [Flag: %d]\n", 2426 QED_SLOWPATH_MFW_TLV_REQ); 2427 smp_mb__before_atomic(); 2428 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 2429 smp_mb__after_atomic(); 2430 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 2431 2432 return 0; 2433 } 2434 2435 static void 2436 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 2437 { 2438 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 2439 struct qed_eth_stats_common *p_common; 2440 struct qed_generic_tlvs gen_tlvs; 2441 struct qed_eth_stats stats; 2442 int i; 2443 2444 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 2445 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 2446 2447 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 2448 tlv->flags.ipv4_csum_offload = true; 2449 if (gen_tlvs.feat_flags & QED_TLV_LSO) 2450 tlv->flags.lso_supported = true; 2451 tlv->flags.b_set = true; 2452 2453 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 2454 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 2455 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 2456 tlv->mac_set[i] = true; 2457 } 2458 } 2459 2460 qed_get_vport_stats(cdev, &stats); 2461 p_common = &stats.common; 2462 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 2463 p_common->rx_bcast_pkts; 2464 tlv->rx_frames_set = true; 2465 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 2466 p_common->rx_bcast_bytes; 2467 tlv->rx_bytes_set = true; 2468 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 2469 p_common->tx_bcast_pkts; 2470 tlv->tx_frames_set = true; 2471 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 2472 p_common->tx_bcast_bytes; 2473 tlv->rx_bytes_set = true; 2474 } 2475 2476 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 2477 union qed_mfw_tlv_data *tlv_buf) 2478 { 2479 struct qed_dev *cdev = hwfn->cdev; 2480 struct qed_common_cb_ops *ops; 2481 2482 ops = cdev->protocol_ops.common; 2483 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 2484 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 2485 return -EINVAL; 2486 } 2487 2488 switch (type) { 2489 case QED_MFW_TLV_GENERIC: 2490 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 2491 break; 2492 case QED_MFW_TLV_ETH: 2493 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 2494 break; 2495 case QED_MFW_TLV_FCOE: 2496 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 2497 break; 2498 case QED_MFW_TLV_ISCSI: 2499 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 2500 break; 2501 default: 2502 break; 2503 } 2504 2505 return 0; 2506 } 2507