1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/ethtool.h> 19 #include <linux/netdevice.h> 20 #include <linux/net_tstamp.h> 21 #include <linux/pci.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn66xx_regs.h" 31 #include "cn66xx_device.h" 32 #include "cn23xx_pf_device.h" 33 #include "cn23xx_vf_device.h" 34 35 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); 36 37 struct oct_intrmod_resp { 38 u64 rh; 39 struct oct_intrmod_cfg intrmod; 40 u64 status; 41 }; 42 43 struct oct_mdio_cmd_resp { 44 u64 rh; 45 struct oct_mdio_cmd resp; 46 u64 status; 47 }; 48 49 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 50 51 /* Octeon's interface mode of operation */ 52 enum { 53 INTERFACE_MODE_DISABLED, 54 INTERFACE_MODE_RGMII, 55 INTERFACE_MODE_GMII, 56 INTERFACE_MODE_SPI, 57 INTERFACE_MODE_PCIE, 58 INTERFACE_MODE_XAUI, 59 INTERFACE_MODE_SGMII, 60 INTERFACE_MODE_PICMG, 61 INTERFACE_MODE_NPI, 62 INTERFACE_MODE_LOOP, 63 INTERFACE_MODE_SRIO, 64 INTERFACE_MODE_ILK, 65 INTERFACE_MODE_RXAUI, 66 INTERFACE_MODE_QSGMII, 67 INTERFACE_MODE_AGL, 68 INTERFACE_MODE_XLAUI, 69 INTERFACE_MODE_XFI, 70 INTERFACE_MODE_10G_KR, 71 INTERFACE_MODE_40G_KR4, 72 INTERFACE_MODE_MIXED, 73 }; 74 75 #define OCT_ETHTOOL_REGDUMP_LEN 4096 76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 77 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 78 #define OCT_ETHTOOL_REGSVER 1 79 80 /* statistics of PF */ 81 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 82 "rx_packets", 83 "tx_packets", 84 "rx_bytes", 85 "tx_bytes", 86 "rx_errors", 87 "tx_errors", 88 "rx_dropped", 89 "tx_dropped", 90 91 "tx_total_sent", 92 "tx_total_fwd", 93 "tx_err_pko", 94 "tx_err_pki", 95 "tx_err_link", 96 "tx_err_drop", 97 98 "tx_tso", 99 "tx_tso_packets", 100 "tx_tso_err", 101 "tx_vxlan", 102 103 "tx_mcast", 104 "tx_bcast", 105 106 "mac_tx_total_pkts", 107 "mac_tx_total_bytes", 108 "mac_tx_mcast_pkts", 109 "mac_tx_bcast_pkts", 110 "mac_tx_ctl_packets", 111 "mac_tx_total_collisions", 112 "mac_tx_one_collision", 113 "mac_tx_multi_collision", 114 "mac_tx_max_collision_fail", 115 "mac_tx_max_deferral_fail", 116 "mac_tx_fifo_err", 117 "mac_tx_runts", 118 119 "rx_total_rcvd", 120 "rx_total_fwd", 121 "rx_mcast", 122 "rx_bcast", 123 "rx_jabber_err", 124 "rx_l2_err", 125 "rx_frame_err", 126 "rx_err_pko", 127 "rx_err_link", 128 "rx_err_drop", 129 130 "rx_vxlan", 131 "rx_vxlan_err", 132 133 "rx_lro_pkts", 134 "rx_lro_bytes", 135 "rx_total_lro", 136 137 "rx_lro_aborts", 138 "rx_lro_aborts_port", 139 "rx_lro_aborts_seq", 140 "rx_lro_aborts_tsval", 141 "rx_lro_aborts_timer", 142 "rx_fwd_rate", 143 144 "mac_rx_total_rcvd", 145 "mac_rx_bytes", 146 "mac_rx_total_bcst", 147 "mac_rx_total_mcst", 148 "mac_rx_runts", 149 "mac_rx_ctl_packets", 150 "mac_rx_fifo_err", 151 "mac_rx_dma_drop", 152 "mac_rx_fcs_err", 153 154 "link_state_changes", 155 }; 156 157 /* statistics of VF */ 158 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 159 "rx_packets", 160 "tx_packets", 161 "rx_bytes", 162 "tx_bytes", 163 "rx_errors", 164 "tx_errors", 165 "rx_dropped", 166 "tx_dropped", 167 "rx_mcast", 168 "tx_mcast", 169 "rx_bcast", 170 "tx_bcast", 171 "link_state_changes", 172 }; 173 174 /* statistics of host tx queue */ 175 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 176 "packets", 177 "bytes", 178 "dropped", 179 "iq_busy", 180 "sgentry_sent", 181 182 "fw_instr_posted", 183 "fw_instr_processed", 184 "fw_instr_dropped", 185 "fw_bytes_sent", 186 187 "tso", 188 "vxlan", 189 "txq_restart", 190 }; 191 192 /* statistics of host rx queue */ 193 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 194 "packets", 195 "bytes", 196 "dropped", 197 "dropped_nomem", 198 "dropped_toomany", 199 "fw_dropped", 200 "fw_pkts_received", 201 "fw_bytes_received", 202 "fw_dropped_nodispatch", 203 204 "vxlan", 205 "buffer_alloc_failure", 206 }; 207 208 /* LiquidIO driver private flags */ 209 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 210 }; 211 212 #define OCTNIC_NCMD_AUTONEG_ON 0x1 213 #define OCTNIC_NCMD_PHY_ON 0x2 214 215 static int lio_get_link_ksettings(struct net_device *netdev, 216 struct ethtool_link_ksettings *ecmd) 217 { 218 struct lio *lio = GET_LIO(netdev); 219 struct octeon_device *oct = lio->oct_dev; 220 struct oct_link_info *linfo; 221 222 linfo = &lio->linfo; 223 224 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 225 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 226 227 switch (linfo->link.s.phy_type) { 228 case LIO_PHY_PORT_TP: 229 ecmd->base.port = PORT_TP; 230 ecmd->base.autoneg = AUTONEG_DISABLE; 231 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 232 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 233 ethtool_link_ksettings_add_link_mode(ecmd, supported, 234 10000baseT_Full); 235 236 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 237 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 238 10000baseT_Full); 239 240 break; 241 242 case LIO_PHY_PORT_FIBRE: 243 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 244 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 245 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || 246 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 247 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); 248 ecmd->base.transceiver = XCVR_EXTERNAL; 249 } else { 250 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", 251 linfo->link.s.if_mode); 252 } 253 254 ecmd->base.port = PORT_FIBRE; 255 ecmd->base.autoneg = AUTONEG_DISABLE; 256 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); 257 258 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 259 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 260 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 261 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 262 if (OCTEON_CN23XX_PF(oct)) { 263 ethtool_link_ksettings_add_link_mode 264 (ecmd, supported, 25000baseSR_Full); 265 ethtool_link_ksettings_add_link_mode 266 (ecmd, supported, 25000baseKR_Full); 267 ethtool_link_ksettings_add_link_mode 268 (ecmd, supported, 25000baseCR_Full); 269 270 if (oct->no_speed_setting == 0) { 271 ethtool_link_ksettings_add_link_mode 272 (ecmd, supported, 273 10000baseSR_Full); 274 ethtool_link_ksettings_add_link_mode 275 (ecmd, supported, 276 10000baseKR_Full); 277 ethtool_link_ksettings_add_link_mode 278 (ecmd, supported, 279 10000baseCR_Full); 280 } 281 282 if (oct->no_speed_setting == 0) { 283 liquidio_get_speed(lio); 284 liquidio_get_fec(lio); 285 } else { 286 oct->speed_setting = 25; 287 } 288 289 if (oct->speed_setting == 10) { 290 ethtool_link_ksettings_add_link_mode 291 (ecmd, advertising, 292 10000baseSR_Full); 293 ethtool_link_ksettings_add_link_mode 294 (ecmd, advertising, 295 10000baseKR_Full); 296 ethtool_link_ksettings_add_link_mode 297 (ecmd, advertising, 298 10000baseCR_Full); 299 } 300 if (oct->speed_setting == 25) { 301 ethtool_link_ksettings_add_link_mode 302 (ecmd, advertising, 303 25000baseSR_Full); 304 ethtool_link_ksettings_add_link_mode 305 (ecmd, advertising, 306 25000baseKR_Full); 307 ethtool_link_ksettings_add_link_mode 308 (ecmd, advertising, 309 25000baseCR_Full); 310 } 311 312 if (oct->no_speed_setting) 313 break; 314 315 ethtool_link_ksettings_add_link_mode 316 (ecmd, supported, FEC_RS); 317 ethtool_link_ksettings_add_link_mode 318 (ecmd, supported, FEC_NONE); 319 /*FEC_OFF*/ 320 if (oct->props[lio->ifidx].fec == 1) { 321 /* ETHTOOL_FEC_RS */ 322 ethtool_link_ksettings_add_link_mode 323 (ecmd, advertising, FEC_RS); 324 } else { 325 /* ETHTOOL_FEC_OFF */ 326 ethtool_link_ksettings_add_link_mode 327 (ecmd, advertising, FEC_NONE); 328 } 329 } else { /* VF */ 330 if (linfo->link.s.speed == 10000) { 331 ethtool_link_ksettings_add_link_mode 332 (ecmd, supported, 333 10000baseSR_Full); 334 ethtool_link_ksettings_add_link_mode 335 (ecmd, supported, 336 10000baseKR_Full); 337 ethtool_link_ksettings_add_link_mode 338 (ecmd, supported, 339 10000baseCR_Full); 340 341 ethtool_link_ksettings_add_link_mode 342 (ecmd, advertising, 343 10000baseSR_Full); 344 ethtool_link_ksettings_add_link_mode 345 (ecmd, advertising, 346 10000baseKR_Full); 347 ethtool_link_ksettings_add_link_mode 348 (ecmd, advertising, 349 10000baseCR_Full); 350 } 351 352 if (linfo->link.s.speed == 25000) { 353 ethtool_link_ksettings_add_link_mode 354 (ecmd, supported, 355 25000baseSR_Full); 356 ethtool_link_ksettings_add_link_mode 357 (ecmd, supported, 358 25000baseKR_Full); 359 ethtool_link_ksettings_add_link_mode 360 (ecmd, supported, 361 25000baseCR_Full); 362 363 ethtool_link_ksettings_add_link_mode 364 (ecmd, advertising, 365 25000baseSR_Full); 366 ethtool_link_ksettings_add_link_mode 367 (ecmd, advertising, 368 25000baseKR_Full); 369 ethtool_link_ksettings_add_link_mode 370 (ecmd, advertising, 371 25000baseCR_Full); 372 } 373 } 374 } else { 375 ethtool_link_ksettings_add_link_mode(ecmd, supported, 376 10000baseT_Full); 377 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 378 10000baseT_Full); 379 } 380 break; 381 } 382 383 if (linfo->link.s.link_up) { 384 ecmd->base.speed = linfo->link.s.speed; 385 ecmd->base.duplex = linfo->link.s.duplex; 386 } else { 387 ecmd->base.speed = SPEED_UNKNOWN; 388 ecmd->base.duplex = DUPLEX_UNKNOWN; 389 } 390 391 return 0; 392 } 393 394 static int lio_set_link_ksettings(struct net_device *netdev, 395 const struct ethtool_link_ksettings *ecmd) 396 { 397 const int speed = ecmd->base.speed; 398 struct lio *lio = GET_LIO(netdev); 399 struct oct_link_info *linfo; 400 struct octeon_device *oct; 401 402 oct = lio->oct_dev; 403 404 linfo = &lio->linfo; 405 406 if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 407 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID)) 408 return -EOPNOTSUPP; 409 410 if (oct->no_speed_setting) { 411 dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", 412 __func__); 413 return -EOPNOTSUPP; 414 } 415 416 if ((ecmd->base.duplex != DUPLEX_UNKNOWN && 417 ecmd->base.duplex != linfo->link.s.duplex) || 418 ecmd->base.autoneg != AUTONEG_DISABLE || 419 (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && 420 ecmd->base.speed != SPEED_UNKNOWN)) 421 return -EOPNOTSUPP; 422 423 if ((oct->speed_boot == speed / 1000) && 424 oct->speed_boot == oct->speed_setting) 425 return 0; 426 427 liquidio_set_speed(lio, speed / 1000); 428 429 dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", 430 oct->speed_setting); 431 432 return 0; 433 } 434 435 static void 436 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 437 { 438 struct lio *lio; 439 struct octeon_device *oct; 440 441 lio = GET_LIO(netdev); 442 oct = lio->oct_dev; 443 444 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 445 strcpy(drvinfo->driver, "liquidio"); 446 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 447 ETHTOOL_FWVERS_LEN); 448 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 449 } 450 451 static void 452 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 453 { 454 struct octeon_device *oct; 455 struct lio *lio; 456 457 lio = GET_LIO(netdev); 458 oct = lio->oct_dev; 459 460 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 461 strcpy(drvinfo->driver, "liquidio_vf"); 462 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 463 ETHTOOL_FWVERS_LEN); 464 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 465 } 466 467 static int 468 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) 469 { 470 struct lio *lio = GET_LIO(netdev); 471 struct octeon_device *oct = lio->oct_dev; 472 struct octnic_ctrl_pkt nctrl; 473 int ret = 0; 474 475 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 476 477 nctrl.ncmd.u64 = 0; 478 nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; 479 nctrl.ncmd.s.param1 = num_queues; 480 nctrl.ncmd.s.param2 = num_queues; 481 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 482 nctrl.netpndev = (u64)netdev; 483 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 484 485 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 486 if (ret) { 487 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", 488 ret); 489 return -1; 490 } 491 492 return 0; 493 } 494 495 static void 496 lio_ethtool_get_channels(struct net_device *dev, 497 struct ethtool_channels *channel) 498 { 499 struct lio *lio = GET_LIO(dev); 500 struct octeon_device *oct = lio->oct_dev; 501 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 502 u32 combined_count = 0, max_combined = 0; 503 504 if (OCTEON_CN6XXX(oct)) { 505 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 506 507 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 508 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 509 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 510 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 511 } else if (OCTEON_CN23XX_PF(oct)) { 512 if (oct->sriov_info.sriov_enabled) { 513 max_combined = lio->linfo.num_txpciq; 514 } else { 515 struct octeon_config *conf23_pf = 516 CHIP_CONF(oct, cn23xx_pf); 517 518 max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); 519 } 520 combined_count = oct->num_iqs; 521 } else if (OCTEON_CN23XX_VF(oct)) { 522 u64 reg_val = 0ULL; 523 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 524 525 reg_val = octeon_read_csr64(oct, ctrl); 526 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 527 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 528 combined_count = oct->num_iqs; 529 } 530 531 channel->max_rx = max_rx; 532 channel->max_tx = max_tx; 533 channel->max_combined = max_combined; 534 channel->rx_count = rx_count; 535 channel->tx_count = tx_count; 536 channel->combined_count = combined_count; 537 } 538 539 static int 540 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) 541 { 542 struct msix_entry *msix_entries; 543 int num_msix_irqs = 0; 544 int i; 545 546 if (!oct->msix_on) 547 return 0; 548 549 /* Disable the input and output queues now. No more packets will 550 * arrive from Octeon. 551 */ 552 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 553 554 if (oct->msix_on) { 555 if (OCTEON_CN23XX_PF(oct)) 556 num_msix_irqs = oct->num_msix_irqs - 1; 557 else if (OCTEON_CN23XX_VF(oct)) 558 num_msix_irqs = oct->num_msix_irqs; 559 560 msix_entries = (struct msix_entry *)oct->msix_entries; 561 for (i = 0; i < num_msix_irqs; i++) { 562 if (oct->ioq_vector[i].vector) { 563 /* clear the affinity_cpumask */ 564 irq_set_affinity_hint(msix_entries[i].vector, 565 NULL); 566 free_irq(msix_entries[i].vector, 567 &oct->ioq_vector[i]); 568 oct->ioq_vector[i].vector = 0; 569 } 570 } 571 572 /* non-iov vector's argument is oct struct */ 573 if (OCTEON_CN23XX_PF(oct)) 574 free_irq(msix_entries[i].vector, oct); 575 576 pci_disable_msix(oct->pci_dev); 577 kfree(oct->msix_entries); 578 oct->msix_entries = NULL; 579 } 580 581 kfree(oct->irq_name_storage); 582 oct->irq_name_storage = NULL; 583 584 if (octeon_allocate_ioq_vector(oct, num_ioqs)) { 585 dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 586 return -1; 587 } 588 589 if (octeon_setup_interrupt(oct, num_ioqs)) { 590 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); 591 return -1; 592 } 593 594 /* Enable Octeon device interrupts */ 595 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 596 597 return 0; 598 } 599 600 static int 601 lio_ethtool_set_channels(struct net_device *dev, 602 struct ethtool_channels *channel) 603 { 604 u32 combined_count, max_combined; 605 struct lio *lio = GET_LIO(dev); 606 struct octeon_device *oct = lio->oct_dev; 607 int stopped = 0; 608 609 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { 610 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); 611 return -EINVAL; 612 } 613 614 if (!channel->combined_count || channel->other_count || 615 channel->rx_count || channel->tx_count) 616 return -EINVAL; 617 618 combined_count = channel->combined_count; 619 620 if (OCTEON_CN23XX_PF(oct)) { 621 if (oct->sriov_info.sriov_enabled) { 622 max_combined = lio->linfo.num_txpciq; 623 } else { 624 struct octeon_config *conf23_pf = 625 CHIP_CONF(oct, 626 cn23xx_pf); 627 628 max_combined = 629 CFG_GET_IQ_MAX_Q(conf23_pf); 630 } 631 } else if (OCTEON_CN23XX_VF(oct)) { 632 u64 reg_val = 0ULL; 633 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 634 635 reg_val = octeon_read_csr64(oct, ctrl); 636 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 637 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 638 } else { 639 return -EINVAL; 640 } 641 642 if (combined_count > max_combined || combined_count < 1) 643 return -EINVAL; 644 645 if (combined_count == oct->num_iqs) 646 return 0; 647 648 ifstate_set(lio, LIO_IFSTATE_RESETTING); 649 650 if (netif_running(dev)) { 651 dev->netdev_ops->ndo_stop(dev); 652 stopped = 1; 653 } 654 655 if (lio_reset_queues(dev, combined_count)) 656 return -EINVAL; 657 658 if (stopped) 659 dev->netdev_ops->ndo_open(dev); 660 661 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 662 663 return 0; 664 } 665 666 static int lio_get_eeprom_len(struct net_device *netdev) 667 { 668 u8 buf[192]; 669 struct lio *lio = GET_LIO(netdev); 670 struct octeon_device *oct_dev = lio->oct_dev; 671 struct octeon_board_info *board_info; 672 int len; 673 674 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 675 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 676 board_info->name, board_info->serial_number, 677 board_info->major, board_info->minor); 678 679 return len; 680 } 681 682 static int 683 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 684 u8 *bytes) 685 { 686 struct lio *lio = GET_LIO(netdev); 687 struct octeon_device *oct_dev = lio->oct_dev; 688 struct octeon_board_info *board_info; 689 690 if (eeprom->offset) 691 return -EINVAL; 692 693 eeprom->magic = oct_dev->pci_dev->vendor; 694 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 695 sprintf((char *)bytes, 696 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 697 board_info->name, board_info->serial_number, 698 board_info->major, board_info->minor); 699 700 return 0; 701 } 702 703 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 704 { 705 struct lio *lio = GET_LIO(netdev); 706 struct octeon_device *oct = lio->oct_dev; 707 struct octnic_ctrl_pkt nctrl; 708 int ret = 0; 709 710 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 711 712 nctrl.ncmd.u64 = 0; 713 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 714 nctrl.ncmd.s.param1 = addr; 715 nctrl.ncmd.s.param2 = val; 716 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 717 nctrl.netpndev = (u64)netdev; 718 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 719 720 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 721 if (ret) { 722 dev_err(&oct->pci_dev->dev, 723 "Failed to configure gpio value, ret=%d\n", ret); 724 return -EINVAL; 725 } 726 727 return 0; 728 } 729 730 static int octnet_id_active(struct net_device *netdev, int val) 731 { 732 struct lio *lio = GET_LIO(netdev); 733 struct octeon_device *oct = lio->oct_dev; 734 struct octnic_ctrl_pkt nctrl; 735 int ret = 0; 736 737 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 738 739 nctrl.ncmd.u64 = 0; 740 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 741 nctrl.ncmd.s.param1 = val; 742 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 743 nctrl.netpndev = (u64)netdev; 744 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 745 746 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 747 if (ret) { 748 dev_err(&oct->pci_dev->dev, 749 "Failed to configure gpio value, ret=%d\n", ret); 750 return -EINVAL; 751 } 752 753 return 0; 754 } 755 756 /* This routine provides PHY access routines for 757 * mdio clause45 . 758 */ 759 static int 760 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 761 { 762 struct octeon_device *oct_dev = lio->oct_dev; 763 struct octeon_soft_command *sc; 764 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 765 struct oct_mdio_cmd *mdio_cmd; 766 int retval = 0; 767 768 sc = (struct octeon_soft_command *) 769 octeon_alloc_soft_command(oct_dev, 770 sizeof(struct oct_mdio_cmd), 771 sizeof(struct oct_mdio_cmd_resp), 0); 772 773 if (!sc) 774 return -ENOMEM; 775 776 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 777 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 778 779 mdio_cmd->op = op; 780 mdio_cmd->mdio_addr = loc; 781 if (op) 782 mdio_cmd->value1 = *value; 783 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 784 785 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 786 787 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 788 0, 0, 0); 789 790 init_completion(&sc->complete); 791 sc->sc_status = OCTEON_REQUEST_PENDING; 792 793 retval = octeon_send_soft_command(oct_dev, sc); 794 if (retval == IQ_SEND_FAILED) { 795 dev_err(&oct_dev->pci_dev->dev, 796 "octnet_mdio45_access instruction failed status: %x\n", 797 retval); 798 octeon_free_soft_command(oct_dev, sc); 799 return -EBUSY; 800 } else { 801 /* Sleep on a wait queue till the cond flag indicates that the 802 * response arrived 803 */ 804 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); 805 if (retval) 806 return retval; 807 808 retval = mdio_cmd_rsp->status; 809 if (retval) { 810 dev_err(&oct_dev->pci_dev->dev, 811 "octnet mdio45 access failed: %x\n", retval); 812 WRITE_ONCE(sc->caller_is_done, true); 813 return -EBUSY; 814 } 815 816 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 817 sizeof(struct oct_mdio_cmd) / 8); 818 819 if (!op) 820 *value = mdio_cmd_rsp->resp.value1; 821 822 WRITE_ONCE(sc->caller_is_done, true); 823 } 824 825 return retval; 826 } 827 828 static int lio_set_phys_id(struct net_device *netdev, 829 enum ethtool_phys_id_state state) 830 { 831 struct lio *lio = GET_LIO(netdev); 832 struct octeon_device *oct = lio->oct_dev; 833 struct oct_link_info *linfo; 834 int value, ret; 835 u32 cur_ver; 836 837 linfo = &lio->linfo; 838 cur_ver = OCT_FW_VER(oct->fw_info.ver.maj, 839 oct->fw_info.ver.min, 840 oct->fw_info.ver.rev); 841 842 switch (state) { 843 case ETHTOOL_ID_ACTIVE: 844 if (oct->chip_id == OCTEON_CN66XX) { 845 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 846 VITESSE_PHY_GPIO_DRIVEON); 847 return 2; 848 849 } else if (oct->chip_id == OCTEON_CN68XX) { 850 /* Save the current LED settings */ 851 ret = octnet_mdio45_access(lio, 0, 852 LIO68XX_LED_BEACON_ADDR, 853 &lio->phy_beacon_val); 854 if (ret) 855 return ret; 856 857 ret = octnet_mdio45_access(lio, 0, 858 LIO68XX_LED_CTRL_ADDR, 859 &lio->led_ctrl_val); 860 if (ret) 861 return ret; 862 863 /* Configure Beacon values */ 864 value = LIO68XX_LED_BEACON_CFGON; 865 ret = octnet_mdio45_access(lio, 1, 866 LIO68XX_LED_BEACON_ADDR, 867 &value); 868 if (ret) 869 return ret; 870 871 value = LIO68XX_LED_CTRL_CFGON; 872 ret = octnet_mdio45_access(lio, 1, 873 LIO68XX_LED_CTRL_ADDR, 874 &value); 875 if (ret) 876 return ret; 877 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 878 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 879 if (linfo->link.s.phy_type == LIO_PHY_PORT_TP && 880 cur_ver > OCT_FW_VER(1, 7, 2)) 881 return 2; 882 else 883 return 0; 884 } else { 885 return -EINVAL; 886 } 887 break; 888 889 case ETHTOOL_ID_ON: 890 if (oct->chip_id == OCTEON_CN23XX_PF_VID && 891 linfo->link.s.phy_type == LIO_PHY_PORT_TP && 892 cur_ver > OCT_FW_VER(1, 7, 2)) 893 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 894 else if (oct->chip_id == OCTEON_CN66XX) 895 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 896 VITESSE_PHY_GPIO_HIGH); 897 else 898 return -EINVAL; 899 900 break; 901 902 case ETHTOOL_ID_OFF: 903 if (oct->chip_id == OCTEON_CN23XX_PF_VID && 904 linfo->link.s.phy_type == LIO_PHY_PORT_TP && 905 cur_ver > OCT_FW_VER(1, 7, 2)) 906 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 907 else if (oct->chip_id == OCTEON_CN66XX) 908 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 909 VITESSE_PHY_GPIO_LOW); 910 else 911 return -EINVAL; 912 913 break; 914 915 case ETHTOOL_ID_INACTIVE: 916 if (oct->chip_id == OCTEON_CN66XX) { 917 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 918 VITESSE_PHY_GPIO_DRIVEOFF); 919 } else if (oct->chip_id == OCTEON_CN68XX) { 920 /* Restore LED settings */ 921 ret = octnet_mdio45_access(lio, 1, 922 LIO68XX_LED_CTRL_ADDR, 923 &lio->led_ctrl_val); 924 if (ret) 925 return ret; 926 927 ret = octnet_mdio45_access(lio, 1, 928 LIO68XX_LED_BEACON_ADDR, 929 &lio->phy_beacon_val); 930 if (ret) 931 return ret; 932 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 933 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 934 935 return 0; 936 } else { 937 return -EINVAL; 938 } 939 break; 940 941 default: 942 return -EINVAL; 943 } 944 945 return 0; 946 } 947 948 static void 949 lio_ethtool_get_ringparam(struct net_device *netdev, 950 struct ethtool_ringparam *ering, 951 struct kernel_ethtool_ringparam *kernel_ering, 952 struct netlink_ext_ack *extack) 953 { 954 struct lio *lio = GET_LIO(netdev); 955 struct octeon_device *oct = lio->oct_dev; 956 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 957 rx_pending = 0; 958 959 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 960 return; 961 962 if (OCTEON_CN6XXX(oct)) { 963 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 964 965 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 966 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 967 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 968 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 969 } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { 970 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 971 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 972 rx_pending = oct->droq[0]->max_count; 973 tx_pending = oct->instr_queue[0]->max_count; 974 } 975 976 ering->tx_pending = tx_pending; 977 ering->tx_max_pending = tx_max_pending; 978 ering->rx_pending = rx_pending; 979 ering->rx_max_pending = rx_max_pending; 980 ering->rx_mini_pending = 0; 981 ering->rx_jumbo_pending = 0; 982 ering->rx_mini_max_pending = 0; 983 ering->rx_jumbo_max_pending = 0; 984 } 985 986 static int lio_23xx_reconfigure_queue_count(struct lio *lio) 987 { 988 struct octeon_device *oct = lio->oct_dev; 989 u32 resp_size, data_size; 990 struct liquidio_if_cfg_resp *resp; 991 struct octeon_soft_command *sc; 992 union oct_nic_if_cfg if_cfg; 993 struct lio_version *vdata; 994 u32 ifidx_or_pfnum; 995 int retval; 996 int j; 997 998 resp_size = sizeof(struct liquidio_if_cfg_resp); 999 data_size = sizeof(struct lio_version); 1000 sc = (struct octeon_soft_command *) 1001 octeon_alloc_soft_command(oct, data_size, 1002 resp_size, 0); 1003 if (!sc) { 1004 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", 1005 __func__); 1006 return -1; 1007 } 1008 1009 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1010 vdata = (struct lio_version *)sc->virtdptr; 1011 1012 vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1013 vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1014 vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1015 1016 ifidx_or_pfnum = oct->pf_num; 1017 1018 if_cfg.u64 = 0; 1019 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; 1020 if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; 1021 if_cfg.s.base_queue = oct->sriov_info.pf_srn; 1022 if_cfg.s.gmx_port_id = oct->pf_num; 1023 1024 sc->iq_no = 0; 1025 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1026 OPCODE_NIC_QCOUNT_UPDATE, 0, 1027 if_cfg.u64, 0); 1028 1029 init_completion(&sc->complete); 1030 sc->sc_status = OCTEON_REQUEST_PENDING; 1031 1032 retval = octeon_send_soft_command(oct, sc); 1033 if (retval == IQ_SEND_FAILED) { 1034 dev_err(&oct->pci_dev->dev, 1035 "Sending iq/oq config failed status: %x\n", 1036 retval); 1037 octeon_free_soft_command(oct, sc); 1038 return -EIO; 1039 } 1040 1041 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1042 if (retval) 1043 return retval; 1044 1045 retval = resp->status; 1046 if (retval) { 1047 dev_err(&oct->pci_dev->dev, 1048 "iq/oq config failed: %x\n", retval); 1049 WRITE_ONCE(sc->caller_is_done, true); 1050 return -1; 1051 } 1052 1053 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 1054 (sizeof(struct liquidio_if_cfg_info)) >> 3); 1055 1056 lio->ifidx = ifidx_or_pfnum; 1057 lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); 1058 lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); 1059 for (j = 0; j < lio->linfo.num_rxpciq; j++) { 1060 lio->linfo.rxpciq[j].u64 = 1061 resp->cfg_info.linfo.rxpciq[j].u64; 1062 } 1063 1064 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1065 lio->linfo.txpciq[j].u64 = 1066 resp->cfg_info.linfo.txpciq[j].u64; 1067 } 1068 1069 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1070 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1071 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 1072 lio->txq = lio->linfo.txpciq[0].s.q_no; 1073 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 1074 1075 dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", 1076 lio->linfo.num_rxpciq); 1077 1078 WRITE_ONCE(sc->caller_is_done, true); 1079 1080 return 0; 1081 } 1082 1083 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) 1084 { 1085 struct lio *lio = GET_LIO(netdev); 1086 struct octeon_device *oct = lio->oct_dev; 1087 int i, queue_count_update = 0; 1088 struct napi_struct *napi, *n; 1089 int ret; 1090 1091 schedule_timeout_uninterruptible(msecs_to_jiffies(100)); 1092 1093 if (wait_for_pending_requests(oct)) 1094 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1095 1096 if (lio_wait_for_instr_fetch(oct)) 1097 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1098 1099 if (octeon_set_io_queues_off(oct)) { 1100 dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); 1101 return -1; 1102 } 1103 1104 /* Disable the input and output queues now. No more packets will 1105 * arrive from Octeon. 1106 */ 1107 oct->fn_list.disable_io_queues(oct); 1108 /* Delete NAPI */ 1109 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1110 netif_napi_del(napi); 1111 1112 if (num_qs != oct->num_iqs) { 1113 ret = netif_set_real_num_rx_queues(netdev, num_qs); 1114 if (ret) { 1115 dev_err(&oct->pci_dev->dev, 1116 "Setting real number rx failed\n"); 1117 return ret; 1118 } 1119 1120 ret = netif_set_real_num_tx_queues(netdev, num_qs); 1121 if (ret) { 1122 dev_err(&oct->pci_dev->dev, 1123 "Setting real number tx failed\n"); 1124 return ret; 1125 } 1126 1127 /* The value of queue_count_update decides whether it is the 1128 * queue count or the descriptor count that is being 1129 * re-configured. 1130 */ 1131 queue_count_update = 1; 1132 } 1133 1134 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled 1135 * and SRIOV disabled. Few things like recreating queue zero, resetting 1136 * glists and IRQs are required for both. For the latter, some more 1137 * steps like updating sriov_info for the octeon device need to be done. 1138 */ 1139 if (queue_count_update) { 1140 cleanup_rx_oom_poll_fn(netdev); 1141 1142 lio_delete_glists(lio); 1143 1144 /* Delete mbox for PF which is SRIOV disabled because sriov_info 1145 * will be now changed. 1146 */ 1147 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) 1148 oct->fn_list.free_mbox(oct); 1149 } 1150 1151 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1152 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1153 continue; 1154 octeon_delete_droq(oct, i); 1155 } 1156 1157 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1158 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1159 continue; 1160 octeon_delete_instr_queue(oct, i); 1161 } 1162 1163 if (queue_count_update) { 1164 /* For PF re-configure sriov related information */ 1165 if ((OCTEON_CN23XX_PF(oct)) && 1166 !oct->sriov_info.sriov_enabled) { 1167 oct->sriov_info.num_pf_rings = num_qs; 1168 if (cn23xx_sriov_config(oct)) { 1169 dev_err(&oct->pci_dev->dev, 1170 "Queue reset aborted: SRIOV config failed\n"); 1171 return -1; 1172 } 1173 1174 num_qs = oct->sriov_info.num_pf_rings; 1175 } 1176 } 1177 1178 if (oct->fn_list.setup_device_regs(oct)) { 1179 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); 1180 return -1; 1181 } 1182 1183 /* The following are needed in case of queue count re-configuration and 1184 * not for descriptor count re-configuration. 1185 */ 1186 if (queue_count_update) { 1187 if (octeon_setup_instr_queues(oct)) 1188 return -1; 1189 1190 if (octeon_setup_output_queues(oct)) 1191 return -1; 1192 1193 /* Recreating mbox for PF that is SRIOV disabled */ 1194 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1195 if (oct->fn_list.setup_mbox(oct)) { 1196 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 1197 return -1; 1198 } 1199 } 1200 1201 /* Deleting and recreating IRQs whether the interface is SRIOV 1202 * enabled or disabled. 1203 */ 1204 if (lio_irq_reallocate_irqs(oct, num_qs)) { 1205 dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); 1206 return -1; 1207 } 1208 1209 /* Enable the input and output queues for this Octeon device */ 1210 if (oct->fn_list.enable_io_queues(oct)) { 1211 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); 1212 return -1; 1213 } 1214 1215 for (i = 0; i < oct->num_oqs; i++) 1216 writel(oct->droq[i]->max_count, 1217 oct->droq[i]->pkts_credit_reg); 1218 1219 /* Informing firmware about the new queue count. It is required 1220 * for firmware to allocate more number of queues than those at 1221 * load time. 1222 */ 1223 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1224 if (lio_23xx_reconfigure_queue_count(lio)) 1225 return -1; 1226 } 1227 } 1228 1229 /* Once firmware is aware of the new value, queues can be recreated */ 1230 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { 1231 dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); 1232 return -1; 1233 } 1234 1235 if (queue_count_update) { 1236 if (lio_setup_glists(oct, lio, num_qs)) { 1237 dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); 1238 return -1; 1239 } 1240 1241 if (setup_rx_oom_poll_fn(netdev)) { 1242 dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n"); 1243 return 1; 1244 } 1245 1246 /* Send firmware the information about new number of queues 1247 * if the interface is a VF or a PF that is SRIOV enabled. 1248 */ 1249 if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) 1250 if (lio_send_queue_count_update(netdev, num_qs)) 1251 return -1; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int 1258 lio_ethtool_set_ringparam(struct net_device *netdev, 1259 struct ethtool_ringparam *ering, 1260 struct kernel_ethtool_ringparam *kernel_ering, 1261 struct netlink_ext_ack *extack) 1262 { 1263 u32 rx_count, tx_count, rx_count_old, tx_count_old; 1264 struct lio *lio = GET_LIO(netdev); 1265 struct octeon_device *oct = lio->oct_dev; 1266 int stopped = 0; 1267 1268 if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) 1269 return -EINVAL; 1270 1271 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 1272 return -EINVAL; 1273 1274 rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, 1275 CN23XX_MAX_OQ_DESCRIPTORS); 1276 tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, 1277 CN23XX_MAX_IQ_DESCRIPTORS); 1278 1279 rx_count_old = oct->droq[0]->max_count; 1280 tx_count_old = oct->instr_queue[0]->max_count; 1281 1282 if (rx_count == rx_count_old && tx_count == tx_count_old) 1283 return 0; 1284 1285 ifstate_set(lio, LIO_IFSTATE_RESETTING); 1286 1287 if (netif_running(netdev)) { 1288 netdev->netdev_ops->ndo_stop(netdev); 1289 stopped = 1; 1290 } 1291 1292 /* Change RX/TX DESCS count */ 1293 if (tx_count != tx_count_old) 1294 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1295 tx_count); 1296 if (rx_count != rx_count_old) 1297 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1298 rx_count); 1299 1300 if (lio_reset_queues(netdev, oct->num_iqs)) 1301 goto err_lio_reset_queues; 1302 1303 if (stopped) 1304 netdev->netdev_ops->ndo_open(netdev); 1305 1306 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 1307 1308 return 0; 1309 1310 err_lio_reset_queues: 1311 if (tx_count != tx_count_old) 1312 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1313 tx_count_old); 1314 if (rx_count != rx_count_old) 1315 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1316 rx_count_old); 1317 return -EINVAL; 1318 } 1319 1320 static u32 lio_get_msglevel(struct net_device *netdev) 1321 { 1322 struct lio *lio = GET_LIO(netdev); 1323 1324 return lio->msg_enable; 1325 } 1326 1327 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 1328 { 1329 struct lio *lio = GET_LIO(netdev); 1330 1331 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 1332 if (msglvl & NETIF_MSG_HW) 1333 liquidio_set_feature(netdev, 1334 OCTNET_CMD_VERBOSE_ENABLE, 0); 1335 else 1336 liquidio_set_feature(netdev, 1337 OCTNET_CMD_VERBOSE_DISABLE, 0); 1338 } 1339 1340 lio->msg_enable = msglvl; 1341 } 1342 1343 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl) 1344 { 1345 struct lio *lio = GET_LIO(netdev); 1346 1347 lio->msg_enable = msglvl; 1348 } 1349 1350 static void 1351 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1352 { 1353 /* Notes: Not supporting any auto negotiation in these 1354 * drivers. Just report pause frame support. 1355 */ 1356 struct lio *lio = GET_LIO(netdev); 1357 struct octeon_device *oct = lio->oct_dev; 1358 1359 pause->autoneg = 0; 1360 1361 pause->tx_pause = oct->tx_pause; 1362 pause->rx_pause = oct->rx_pause; 1363 } 1364 1365 static int 1366 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1367 { 1368 /* Notes: Not supporting any auto negotiation in these 1369 * drivers. 1370 */ 1371 struct lio *lio = GET_LIO(netdev); 1372 struct octeon_device *oct = lio->oct_dev; 1373 struct octnic_ctrl_pkt nctrl; 1374 struct oct_link_info *linfo = &lio->linfo; 1375 1376 int ret = 0; 1377 1378 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 1379 return -EINVAL; 1380 1381 if (linfo->link.s.duplex == 0) { 1382 /*no flow control for half duplex*/ 1383 if (pause->rx_pause || pause->tx_pause) 1384 return -EINVAL; 1385 } 1386 1387 /*do not support autoneg of link flow control*/ 1388 if (pause->autoneg == AUTONEG_ENABLE) 1389 return -EINVAL; 1390 1391 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1392 1393 nctrl.ncmd.u64 = 0; 1394 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 1395 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1396 nctrl.netpndev = (u64)netdev; 1397 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1398 1399 if (pause->rx_pause) { 1400 /*enable rx pause*/ 1401 nctrl.ncmd.s.param1 = 1; 1402 } else { 1403 /*disable rx pause*/ 1404 nctrl.ncmd.s.param1 = 0; 1405 } 1406 1407 if (pause->tx_pause) { 1408 /*enable tx pause*/ 1409 nctrl.ncmd.s.param2 = 1; 1410 } else { 1411 /*disable tx pause*/ 1412 nctrl.ncmd.s.param2 = 0; 1413 } 1414 1415 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1416 if (ret) { 1417 dev_err(&oct->pci_dev->dev, 1418 "Failed to set pause parameter, ret=%d\n", ret); 1419 return -EINVAL; 1420 } 1421 1422 oct->rx_pause = pause->rx_pause; 1423 oct->tx_pause = pause->tx_pause; 1424 1425 return 0; 1426 } 1427 1428 static void 1429 lio_get_ethtool_stats(struct net_device *netdev, 1430 struct ethtool_stats *stats __attribute__((unused)), 1431 u64 *data) 1432 { 1433 struct lio *lio = GET_LIO(netdev); 1434 struct octeon_device *oct_dev = lio->oct_dev; 1435 struct rtnl_link_stats64 lstats; 1436 int i = 0, j; 1437 1438 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1439 return; 1440 1441 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1442 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1443 data[i++] = lstats.rx_packets; 1444 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 1445 data[i++] = lstats.tx_packets; 1446 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1447 data[i++] = lstats.rx_bytes; 1448 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1449 data[i++] = lstats.tx_bytes; 1450 data[i++] = lstats.rx_errors + 1451 oct_dev->link_stats.fromwire.fcs_err + 1452 oct_dev->link_stats.fromwire.jabber_err + 1453 oct_dev->link_stats.fromwire.l2_err + 1454 oct_dev->link_stats.fromwire.frame_err; 1455 data[i++] = lstats.tx_errors; 1456 /*sum of oct->droq[oq_no]->stats->rx_dropped + 1457 *oct->droq[oq_no]->stats->dropped_nodispatch + 1458 *oct->droq[oq_no]->stats->dropped_toomany + 1459 *oct->droq[oq_no]->stats->dropped_nomem 1460 */ 1461 data[i++] = lstats.rx_dropped + 1462 oct_dev->link_stats.fromwire.fifo_err + 1463 oct_dev->link_stats.fromwire.dmac_drop + 1464 oct_dev->link_stats.fromwire.red_drops + 1465 oct_dev->link_stats.fromwire.fw_err_pko + 1466 oct_dev->link_stats.fromwire.fw_err_link + 1467 oct_dev->link_stats.fromwire.fw_err_drop; 1468 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1469 data[i++] = lstats.tx_dropped + 1470 oct_dev->link_stats.fromhost.max_collision_fail + 1471 oct_dev->link_stats.fromhost.max_deferral_fail + 1472 oct_dev->link_stats.fromhost.total_collisions + 1473 oct_dev->link_stats.fromhost.fw_err_pko + 1474 oct_dev->link_stats.fromhost.fw_err_link + 1475 oct_dev->link_stats.fromhost.fw_err_drop + 1476 oct_dev->link_stats.fromhost.fw_err_pki; 1477 1478 /* firmware tx stats */ 1479 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 1480 *fromhost.fw_total_sent 1481 */ 1482 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 1483 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 1484 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 1485 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 1486 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 1487 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ 1488 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); 1489 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 1490 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 1491 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1492 *fw_err_drop 1493 */ 1494 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 1495 1496 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 1497 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 1498 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1499 *fw_tso_fwd 1500 */ 1501 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 1502 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1503 *fw_err_tso 1504 */ 1505 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 1506 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1507 *fw_tx_vxlan 1508 */ 1509 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 1510 1511 /* Multicast packets sent by this port */ 1512 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1513 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1514 1515 /* mac tx statistics */ 1516 /*CVMX_BGXX_CMRX_TX_STAT5 */ 1517 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 1518 /*CVMX_BGXX_CMRX_TX_STAT4 */ 1519 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 1520 /*CVMX_BGXX_CMRX_TX_STAT15 */ 1521 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 1522 /*CVMX_BGXX_CMRX_TX_STAT14 */ 1523 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 1524 /*CVMX_BGXX_CMRX_TX_STAT17 */ 1525 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 1526 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1527 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 1528 /*CVMX_BGXX_CMRX_TX_STAT3 */ 1529 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 1530 /*CVMX_BGXX_CMRX_TX_STAT2 */ 1531 data[i++] = 1532 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 1533 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1534 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 1535 /*CVMX_BGXX_CMRX_TX_STAT1 */ 1536 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 1537 /*CVMX_BGXX_CMRX_TX_STAT16 */ 1538 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 1539 /*CVMX_BGXX_CMRX_TX_STAT6 */ 1540 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 1541 1542 /* RX firmware stats */ 1543 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1544 *fw_total_rcvd 1545 */ 1546 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 1547 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1548 *fw_total_fwd 1549 */ 1550 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 1551 /* Multicast packets received on this port */ 1552 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1553 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1554 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 1555 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 1556 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 1557 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 1558 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 1559 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 1560 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1561 *fw_err_pko 1562 */ 1563 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 1564 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 1565 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 1566 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1567 *fromwire.fw_err_drop 1568 */ 1569 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 1570 1571 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1572 *fromwire.fw_rx_vxlan 1573 */ 1574 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 1575 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1576 *fromwire.fw_rx_vxlan_err 1577 */ 1578 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 1579 1580 /* LRO */ 1581 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1582 *fw_lro_pkts 1583 */ 1584 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 1585 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1586 *fw_lro_octs 1587 */ 1588 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 1589 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 1590 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 1591 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1592 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 1593 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1594 *fw_lro_aborts_port 1595 */ 1596 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 1597 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1598 *fw_lro_aborts_seq 1599 */ 1600 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 1601 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1602 *fw_lro_aborts_tsval 1603 */ 1604 data[i++] = 1605 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 1606 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1607 *fw_lro_aborts_timer 1608 */ 1609 /* intrmod: packet forward rate */ 1610 data[i++] = 1611 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 1612 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1613 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 1614 1615 /* mac: link-level stats */ 1616 /*CVMX_BGXX_CMRX_RX_STAT0 */ 1617 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 1618 /*CVMX_BGXX_CMRX_RX_STAT1 */ 1619 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 1620 /*CVMX_PKI_STATX_STAT5 */ 1621 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 1622 /*CVMX_PKI_STATX_STAT5 */ 1623 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 1624 /*wqe->word2.err_code or wqe->word2.err_level */ 1625 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 1626 /*CVMX_BGXX_CMRX_RX_STAT2 */ 1627 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 1628 /*CVMX_BGXX_CMRX_RX_STAT6 */ 1629 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 1630 /*CVMX_BGXX_CMRX_RX_STAT4 */ 1631 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 1632 /*wqe->word2.err_code or wqe->word2.err_level */ 1633 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 1634 /*lio->link_changes*/ 1635 data[i++] = CVM_CAST64(lio->link_changes); 1636 1637 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 1638 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 1639 continue; 1640 /*packets to network port*/ 1641 /*# of packets tx to network */ 1642 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1643 /*# of bytes tx to network */ 1644 data[i++] = 1645 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1646 /*# of packets dropped */ 1647 data[i++] = 1648 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 1649 /*# of tx fails due to queue full */ 1650 data[i++] = 1651 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 1652 /*XXX gather entries sent */ 1653 data[i++] = 1654 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 1655 1656 /*instruction to firmware: data and control */ 1657 /*# of instructions to the queue */ 1658 data[i++] = 1659 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 1660 /*# of instructions processed */ 1661 data[i++] = CVM_CAST64( 1662 oct_dev->instr_queue[j]->stats.instr_processed); 1663 /*# of instructions could not be processed */ 1664 data[i++] = CVM_CAST64( 1665 oct_dev->instr_queue[j]->stats.instr_dropped); 1666 /*bytes sent through the queue */ 1667 data[i++] = 1668 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 1669 1670 /*tso request*/ 1671 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1672 /*vxlan request*/ 1673 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1674 /*txq restart*/ 1675 data[i++] = 1676 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 1677 } 1678 1679 /* RX */ 1680 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 1681 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 1682 continue; 1683 1684 /*packets send to TCP/IP network stack */ 1685 /*# of packets to network stack */ 1686 data[i++] = 1687 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 1688 /*# of bytes to network stack */ 1689 data[i++] = 1690 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 1691 /*# of packets dropped */ 1692 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1693 oct_dev->droq[j]->stats.dropped_toomany + 1694 oct_dev->droq[j]->stats.rx_dropped); 1695 data[i++] = 1696 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1697 data[i++] = 1698 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1699 data[i++] = 1700 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1701 1702 /*control and data path*/ 1703 data[i++] = 1704 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1705 data[i++] = 1706 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1707 data[i++] = 1708 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1709 1710 data[i++] = 1711 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1712 data[i++] = 1713 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1714 } 1715 } 1716 1717 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1718 struct ethtool_stats *stats 1719 __attribute__((unused)), 1720 u64 *data) 1721 { 1722 struct rtnl_link_stats64 lstats; 1723 struct lio *lio = GET_LIO(netdev); 1724 struct octeon_device *oct_dev = lio->oct_dev; 1725 int i = 0, j, vj; 1726 1727 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1728 return; 1729 1730 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1731 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1732 data[i++] = lstats.rx_packets; 1733 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1734 data[i++] = lstats.tx_packets; 1735 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1736 data[i++] = lstats.rx_bytes; 1737 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1738 data[i++] = lstats.tx_bytes; 1739 data[i++] = lstats.rx_errors; 1740 data[i++] = lstats.tx_errors; 1741 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1742 * oct->droq[oq_no]->stats->dropped_nodispatch + 1743 * oct->droq[oq_no]->stats->dropped_toomany + 1744 * oct->droq[oq_no]->stats->dropped_nomem 1745 */ 1746 data[i++] = lstats.rx_dropped; 1747 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1748 data[i++] = lstats.tx_dropped + 1749 oct_dev->link_stats.fromhost.fw_err_drop; 1750 1751 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1752 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1753 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1754 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1755 1756 /* lio->link_changes */ 1757 data[i++] = CVM_CAST64(lio->link_changes); 1758 1759 for (vj = 0; vj < oct_dev->num_iqs; vj++) { 1760 j = lio->linfo.txpciq[vj].s.q_no; 1761 1762 /* packets to network port */ 1763 /* # of packets tx to network */ 1764 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1765 /* # of bytes tx to network */ 1766 data[i++] = CVM_CAST64( 1767 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1768 /* # of packets dropped */ 1769 data[i++] = CVM_CAST64( 1770 oct_dev->instr_queue[j]->stats.tx_dropped); 1771 /* # of tx fails due to queue full */ 1772 data[i++] = CVM_CAST64( 1773 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1774 /* XXX gather entries sent */ 1775 data[i++] = CVM_CAST64( 1776 oct_dev->instr_queue[j]->stats.sgentry_sent); 1777 1778 /* instruction to firmware: data and control */ 1779 /* # of instructions to the queue */ 1780 data[i++] = CVM_CAST64( 1781 oct_dev->instr_queue[j]->stats.instr_posted); 1782 /* # of instructions processed */ 1783 data[i++] = 1784 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1785 /* # of instructions could not be processed */ 1786 data[i++] = 1787 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1788 /* bytes sent through the queue */ 1789 data[i++] = CVM_CAST64( 1790 oct_dev->instr_queue[j]->stats.bytes_sent); 1791 /* tso request */ 1792 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1793 /* vxlan request */ 1794 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1795 /* txq restart */ 1796 data[i++] = CVM_CAST64( 1797 oct_dev->instr_queue[j]->stats.tx_restart); 1798 } 1799 1800 /* RX */ 1801 for (vj = 0; vj < oct_dev->num_oqs; vj++) { 1802 j = lio->linfo.rxpciq[vj].s.q_no; 1803 1804 /* packets send to TCP/IP network stack */ 1805 /* # of packets to network stack */ 1806 data[i++] = CVM_CAST64( 1807 oct_dev->droq[j]->stats.rx_pkts_received); 1808 /* # of bytes to network stack */ 1809 data[i++] = CVM_CAST64( 1810 oct_dev->droq[j]->stats.rx_bytes_received); 1811 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1812 oct_dev->droq[j]->stats.dropped_toomany + 1813 oct_dev->droq[j]->stats.rx_dropped); 1814 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1815 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1816 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1817 1818 /* control and data path */ 1819 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1820 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1821 data[i++] = 1822 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1823 1824 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1825 data[i++] = 1826 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1827 } 1828 } 1829 1830 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1831 { 1832 struct octeon_device *oct_dev = lio->oct_dev; 1833 int i; 1834 1835 switch (oct_dev->chip_id) { 1836 case OCTEON_CN23XX_PF_VID: 1837 case OCTEON_CN23XX_VF_VID: 1838 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1839 sprintf(data, "%s", oct_priv_flags_strings[i]); 1840 data += ETH_GSTRING_LEN; 1841 } 1842 break; 1843 case OCTEON_CN68XX: 1844 case OCTEON_CN66XX: 1845 break; 1846 default: 1847 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1848 break; 1849 } 1850 } 1851 1852 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1853 { 1854 struct lio *lio = GET_LIO(netdev); 1855 struct octeon_device *oct_dev = lio->oct_dev; 1856 int num_iq_stats, num_oq_stats, i, j; 1857 int num_stats; 1858 1859 switch (stringset) { 1860 case ETH_SS_STATS: 1861 num_stats = ARRAY_SIZE(oct_stats_strings); 1862 for (j = 0; j < num_stats; j++) { 1863 sprintf(data, "%s", oct_stats_strings[j]); 1864 data += ETH_GSTRING_LEN; 1865 } 1866 1867 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1868 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1869 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1870 continue; 1871 for (j = 0; j < num_iq_stats; j++) { 1872 sprintf(data, "tx-%d-%s", i, 1873 oct_iq_stats_strings[j]); 1874 data += ETH_GSTRING_LEN; 1875 } 1876 } 1877 1878 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1879 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1880 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1881 continue; 1882 for (j = 0; j < num_oq_stats; j++) { 1883 sprintf(data, "rx-%d-%s", i, 1884 oct_droq_stats_strings[j]); 1885 data += ETH_GSTRING_LEN; 1886 } 1887 } 1888 break; 1889 1890 case ETH_SS_PRIV_FLAGS: 1891 lio_get_priv_flags_strings(lio, data); 1892 break; 1893 default: 1894 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1895 break; 1896 } 1897 } 1898 1899 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1900 u8 *data) 1901 { 1902 int num_iq_stats, num_oq_stats, i, j; 1903 struct lio *lio = GET_LIO(netdev); 1904 struct octeon_device *oct_dev = lio->oct_dev; 1905 int num_stats; 1906 1907 switch (stringset) { 1908 case ETH_SS_STATS: 1909 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1910 for (j = 0; j < num_stats; j++) { 1911 sprintf(data, "%s", oct_vf_stats_strings[j]); 1912 data += ETH_GSTRING_LEN; 1913 } 1914 1915 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1916 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1917 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1918 continue; 1919 for (j = 0; j < num_iq_stats; j++) { 1920 sprintf(data, "tx-%d-%s", i, 1921 oct_iq_stats_strings[j]); 1922 data += ETH_GSTRING_LEN; 1923 } 1924 } 1925 1926 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1927 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1928 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1929 continue; 1930 for (j = 0; j < num_oq_stats; j++) { 1931 sprintf(data, "rx-%d-%s", i, 1932 oct_droq_stats_strings[j]); 1933 data += ETH_GSTRING_LEN; 1934 } 1935 } 1936 break; 1937 1938 case ETH_SS_PRIV_FLAGS: 1939 lio_get_priv_flags_strings(lio, data); 1940 break; 1941 default: 1942 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1943 break; 1944 } 1945 } 1946 1947 static int lio_get_priv_flags_ss_count(struct lio *lio) 1948 { 1949 struct octeon_device *oct_dev = lio->oct_dev; 1950 1951 switch (oct_dev->chip_id) { 1952 case OCTEON_CN23XX_PF_VID: 1953 case OCTEON_CN23XX_VF_VID: 1954 return ARRAY_SIZE(oct_priv_flags_strings); 1955 case OCTEON_CN68XX: 1956 case OCTEON_CN66XX: 1957 return -EOPNOTSUPP; 1958 default: 1959 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1960 return -EOPNOTSUPP; 1961 } 1962 } 1963 1964 static int lio_get_sset_count(struct net_device *netdev, int sset) 1965 { 1966 struct lio *lio = GET_LIO(netdev); 1967 struct octeon_device *oct_dev = lio->oct_dev; 1968 1969 switch (sset) { 1970 case ETH_SS_STATS: 1971 return (ARRAY_SIZE(oct_stats_strings) + 1972 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1973 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1974 case ETH_SS_PRIV_FLAGS: 1975 return lio_get_priv_flags_ss_count(lio); 1976 default: 1977 return -EOPNOTSUPP; 1978 } 1979 } 1980 1981 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 1982 { 1983 struct lio *lio = GET_LIO(netdev); 1984 struct octeon_device *oct_dev = lio->oct_dev; 1985 1986 switch (sset) { 1987 case ETH_SS_STATS: 1988 return (ARRAY_SIZE(oct_vf_stats_strings) + 1989 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1990 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1991 case ETH_SS_PRIV_FLAGS: 1992 return lio_get_priv_flags_ss_count(lio); 1993 default: 1994 return -EOPNOTSUPP; 1995 } 1996 } 1997 1998 /* get interrupt moderation parameters */ 1999 static int octnet_get_intrmod_cfg(struct lio *lio, 2000 struct oct_intrmod_cfg *intr_cfg) 2001 { 2002 struct octeon_soft_command *sc; 2003 struct oct_intrmod_resp *resp; 2004 int retval; 2005 struct octeon_device *oct_dev = lio->oct_dev; 2006 2007 /* Alloc soft command */ 2008 sc = (struct octeon_soft_command *) 2009 octeon_alloc_soft_command(oct_dev, 2010 0, 2011 sizeof(struct oct_intrmod_resp), 0); 2012 2013 if (!sc) 2014 return -ENOMEM; 2015 2016 resp = (struct oct_intrmod_resp *)sc->virtrptr; 2017 memset(resp, 0, sizeof(struct oct_intrmod_resp)); 2018 2019 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2020 2021 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2022 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); 2023 2024 init_completion(&sc->complete); 2025 sc->sc_status = OCTEON_REQUEST_PENDING; 2026 2027 retval = octeon_send_soft_command(oct_dev, sc); 2028 if (retval == IQ_SEND_FAILED) { 2029 octeon_free_soft_command(oct_dev, sc); 2030 return -EINVAL; 2031 } 2032 2033 /* Sleep on a wait queue till the cond flag indicates that the 2034 * response arrived or timed-out. 2035 */ 2036 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); 2037 if (retval) 2038 return -ENODEV; 2039 2040 if (resp->status) { 2041 dev_err(&oct_dev->pci_dev->dev, 2042 "Get interrupt moderation parameters failed\n"); 2043 WRITE_ONCE(sc->caller_is_done, true); 2044 return -ENODEV; 2045 } 2046 2047 octeon_swap_8B_data((u64 *)&resp->intrmod, 2048 (sizeof(struct oct_intrmod_cfg)) / 8); 2049 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); 2050 WRITE_ONCE(sc->caller_is_done, true); 2051 2052 return 0; 2053 } 2054 2055 /* Configure interrupt moderation parameters */ 2056 static int octnet_set_intrmod_cfg(struct lio *lio, 2057 struct oct_intrmod_cfg *intr_cfg) 2058 { 2059 struct octeon_soft_command *sc; 2060 struct oct_intrmod_cfg *cfg; 2061 int retval; 2062 struct octeon_device *oct_dev = lio->oct_dev; 2063 2064 /* Alloc soft command */ 2065 sc = (struct octeon_soft_command *) 2066 octeon_alloc_soft_command(oct_dev, 2067 sizeof(struct oct_intrmod_cfg), 2068 16, 0); 2069 2070 if (!sc) 2071 return -ENOMEM; 2072 2073 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 2074 2075 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 2076 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 2077 2078 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2079 2080 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2081 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 2082 2083 init_completion(&sc->complete); 2084 sc->sc_status = OCTEON_REQUEST_PENDING; 2085 2086 retval = octeon_send_soft_command(oct_dev, sc); 2087 if (retval == IQ_SEND_FAILED) { 2088 octeon_free_soft_command(oct_dev, sc); 2089 return -EINVAL; 2090 } 2091 2092 /* Sleep on a wait queue till the cond flag indicates that the 2093 * response arrived or timed-out. 2094 */ 2095 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); 2096 if (retval) 2097 return retval; 2098 2099 retval = sc->sc_status; 2100 if (retval == 0) { 2101 dev_info(&oct_dev->pci_dev->dev, 2102 "Rx-Adaptive Interrupt moderation %s\n", 2103 (intr_cfg->rx_enable) ? 2104 "enabled" : "disabled"); 2105 WRITE_ONCE(sc->caller_is_done, true); 2106 return 0; 2107 } 2108 2109 dev_err(&oct_dev->pci_dev->dev, 2110 "intrmod config failed. Status: %x\n", retval); 2111 WRITE_ONCE(sc->caller_is_done, true); 2112 return -ENODEV; 2113 } 2114 2115 static int lio_get_intr_coalesce(struct net_device *netdev, 2116 struct ethtool_coalesce *intr_coal, 2117 struct kernel_ethtool_coalesce *kernel_coal, 2118 struct netlink_ext_ack *extack) 2119 { 2120 struct lio *lio = GET_LIO(netdev); 2121 struct octeon_device *oct = lio->oct_dev; 2122 struct octeon_instr_queue *iq; 2123 struct oct_intrmod_cfg intrmod_cfg; 2124 2125 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) 2126 return -ENODEV; 2127 2128 switch (oct->chip_id) { 2129 case OCTEON_CN23XX_PF_VID: 2130 case OCTEON_CN23XX_VF_VID: { 2131 if (!intrmod_cfg.rx_enable) { 2132 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; 2133 intr_coal->rx_max_coalesced_frames = 2134 oct->rx_max_coalesced_frames; 2135 } 2136 if (!intrmod_cfg.tx_enable) 2137 intr_coal->tx_max_coalesced_frames = 2138 oct->tx_max_coalesced_frames; 2139 break; 2140 } 2141 case OCTEON_CN68XX: 2142 case OCTEON_CN66XX: { 2143 struct octeon_cn6xxx *cn6xxx = 2144 (struct octeon_cn6xxx *)oct->chip; 2145 2146 if (!intrmod_cfg.rx_enable) { 2147 intr_coal->rx_coalesce_usecs = 2148 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 2149 intr_coal->rx_max_coalesced_frames = 2150 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 2151 } 2152 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 2153 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 2154 break; 2155 } 2156 default: 2157 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 2158 return -EINVAL; 2159 } 2160 if (intrmod_cfg.rx_enable) { 2161 intr_coal->use_adaptive_rx_coalesce = 2162 intrmod_cfg.rx_enable; 2163 intr_coal->rate_sample_interval = 2164 intrmod_cfg.check_intrvl; 2165 intr_coal->pkt_rate_high = 2166 intrmod_cfg.maxpkt_ratethr; 2167 intr_coal->pkt_rate_low = 2168 intrmod_cfg.minpkt_ratethr; 2169 intr_coal->rx_max_coalesced_frames_high = 2170 intrmod_cfg.rx_maxcnt_trigger; 2171 intr_coal->rx_coalesce_usecs_high = 2172 intrmod_cfg.rx_maxtmr_trigger; 2173 intr_coal->rx_coalesce_usecs_low = 2174 intrmod_cfg.rx_mintmr_trigger; 2175 intr_coal->rx_max_coalesced_frames_low = 2176 intrmod_cfg.rx_mincnt_trigger; 2177 } 2178 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 2179 (intrmod_cfg.tx_enable)) { 2180 intr_coal->use_adaptive_tx_coalesce = 2181 intrmod_cfg.tx_enable; 2182 intr_coal->tx_max_coalesced_frames_high = 2183 intrmod_cfg.tx_maxcnt_trigger; 2184 intr_coal->tx_max_coalesced_frames_low = 2185 intrmod_cfg.tx_mincnt_trigger; 2186 } 2187 return 0; 2188 } 2189 2190 /* Enable/Disable auto interrupt Moderation */ 2191 static int oct_cfg_adaptive_intr(struct lio *lio, 2192 struct oct_intrmod_cfg *intrmod_cfg, 2193 struct ethtool_coalesce *intr_coal) 2194 { 2195 int ret = 0; 2196 2197 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { 2198 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; 2199 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; 2200 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; 2201 } 2202 if (intrmod_cfg->rx_enable) { 2203 intrmod_cfg->rx_maxcnt_trigger = 2204 intr_coal->rx_max_coalesced_frames_high; 2205 intrmod_cfg->rx_maxtmr_trigger = 2206 intr_coal->rx_coalesce_usecs_high; 2207 intrmod_cfg->rx_mintmr_trigger = 2208 intr_coal->rx_coalesce_usecs_low; 2209 intrmod_cfg->rx_mincnt_trigger = 2210 intr_coal->rx_max_coalesced_frames_low; 2211 } 2212 if (intrmod_cfg->tx_enable) { 2213 intrmod_cfg->tx_maxcnt_trigger = 2214 intr_coal->tx_max_coalesced_frames_high; 2215 intrmod_cfg->tx_mincnt_trigger = 2216 intr_coal->tx_max_coalesced_frames_low; 2217 } 2218 2219 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 2220 2221 return ret; 2222 } 2223 2224 static int 2225 oct_cfg_rx_intrcnt(struct lio *lio, 2226 struct oct_intrmod_cfg *intrmod, 2227 struct ethtool_coalesce *intr_coal) 2228 { 2229 struct octeon_device *oct = lio->oct_dev; 2230 u32 rx_max_coalesced_frames; 2231 2232 /* Config Cnt based interrupt values */ 2233 switch (oct->chip_id) { 2234 case OCTEON_CN68XX: 2235 case OCTEON_CN66XX: { 2236 struct octeon_cn6xxx *cn6xxx = 2237 (struct octeon_cn6xxx *)oct->chip; 2238 2239 if (!intr_coal->rx_max_coalesced_frames) 2240 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 2241 else 2242 rx_max_coalesced_frames = 2243 intr_coal->rx_max_coalesced_frames; 2244 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 2245 rx_max_coalesced_frames); 2246 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 2247 break; 2248 } 2249 case OCTEON_CN23XX_PF_VID: { 2250 int q_no; 2251 2252 if (!intr_coal->rx_max_coalesced_frames) 2253 rx_max_coalesced_frames = intrmod->rx_frames; 2254 else 2255 rx_max_coalesced_frames = 2256 intr_coal->rx_max_coalesced_frames; 2257 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2258 q_no += oct->sriov_info.pf_srn; 2259 octeon_write_csr64( 2260 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2261 (octeon_read_csr64( 2262 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2263 (0x3fffff00000000UL)) | 2264 (rx_max_coalesced_frames - 1)); 2265 /*consider setting resend bit*/ 2266 } 2267 intrmod->rx_frames = rx_max_coalesced_frames; 2268 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2269 break; 2270 } 2271 case OCTEON_CN23XX_VF_VID: { 2272 int q_no; 2273 2274 if (!intr_coal->rx_max_coalesced_frames) 2275 rx_max_coalesced_frames = intrmod->rx_frames; 2276 else 2277 rx_max_coalesced_frames = 2278 intr_coal->rx_max_coalesced_frames; 2279 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2280 octeon_write_csr64( 2281 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2282 (octeon_read_csr64( 2283 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2284 (0x3fffff00000000UL)) | 2285 (rx_max_coalesced_frames - 1)); 2286 /*consider writing to resend bit here*/ 2287 } 2288 intrmod->rx_frames = rx_max_coalesced_frames; 2289 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2290 break; 2291 } 2292 default: 2293 return -EINVAL; 2294 } 2295 return 0; 2296 } 2297 2298 static int oct_cfg_rx_intrtime(struct lio *lio, 2299 struct oct_intrmod_cfg *intrmod, 2300 struct ethtool_coalesce *intr_coal) 2301 { 2302 struct octeon_device *oct = lio->oct_dev; 2303 u32 time_threshold, rx_coalesce_usecs; 2304 2305 /* Config Time based interrupt values */ 2306 switch (oct->chip_id) { 2307 case OCTEON_CN68XX: 2308 case OCTEON_CN66XX: { 2309 struct octeon_cn6xxx *cn6xxx = 2310 (struct octeon_cn6xxx *)oct->chip; 2311 if (!intr_coal->rx_coalesce_usecs) 2312 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 2313 else 2314 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2315 2316 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 2317 rx_coalesce_usecs); 2318 octeon_write_csr(oct, 2319 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 2320 time_threshold); 2321 2322 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 2323 break; 2324 } 2325 case OCTEON_CN23XX_PF_VID: { 2326 u64 time_threshold; 2327 int q_no; 2328 2329 if (!intr_coal->rx_coalesce_usecs) 2330 rx_coalesce_usecs = intrmod->rx_usecs; 2331 else 2332 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2333 time_threshold = 2334 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2335 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2336 q_no += oct->sriov_info.pf_srn; 2337 octeon_write_csr64(oct, 2338 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2339 (intrmod->rx_frames | 2340 ((u64)time_threshold << 32))); 2341 /*consider writing to resend bit here*/ 2342 } 2343 intrmod->rx_usecs = rx_coalesce_usecs; 2344 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2345 break; 2346 } 2347 case OCTEON_CN23XX_VF_VID: { 2348 u64 time_threshold; 2349 int q_no; 2350 2351 if (!intr_coal->rx_coalesce_usecs) 2352 rx_coalesce_usecs = intrmod->rx_usecs; 2353 else 2354 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2355 2356 time_threshold = 2357 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2358 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2359 octeon_write_csr64( 2360 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2361 (intrmod->rx_frames | 2362 ((u64)time_threshold << 32))); 2363 /*consider setting resend bit*/ 2364 } 2365 intrmod->rx_usecs = rx_coalesce_usecs; 2366 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2367 break; 2368 } 2369 default: 2370 return -EINVAL; 2371 } 2372 2373 return 0; 2374 } 2375 2376 static int 2377 oct_cfg_tx_intrcnt(struct lio *lio, 2378 struct oct_intrmod_cfg *intrmod, 2379 struct ethtool_coalesce *intr_coal) 2380 { 2381 struct octeon_device *oct = lio->oct_dev; 2382 u32 iq_intr_pkt; 2383 void __iomem *inst_cnt_reg; 2384 u64 val; 2385 2386 /* Config Cnt based interrupt values */ 2387 switch (oct->chip_id) { 2388 case OCTEON_CN68XX: 2389 case OCTEON_CN66XX: 2390 break; 2391 case OCTEON_CN23XX_VF_VID: 2392 case OCTEON_CN23XX_PF_VID: { 2393 int q_no; 2394 2395 if (!intr_coal->tx_max_coalesced_frames) 2396 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 2397 CN23XX_PKT_IN_DONE_WMARK_MASK; 2398 else 2399 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 2400 CN23XX_PKT_IN_DONE_WMARK_MASK; 2401 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 2402 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 2403 val = readq(inst_cnt_reg); 2404 /*clear wmark and count.dont want to write count back*/ 2405 val = (val & 0xFFFF000000000000ULL) | 2406 ((u64)(iq_intr_pkt - 1) 2407 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 2408 writeq(val, inst_cnt_reg); 2409 /*consider setting resend bit*/ 2410 } 2411 intrmod->tx_frames = iq_intr_pkt; 2412 oct->tx_max_coalesced_frames = iq_intr_pkt; 2413 break; 2414 } 2415 default: 2416 return -EINVAL; 2417 } 2418 return 0; 2419 } 2420 2421 static int lio_set_intr_coalesce(struct net_device *netdev, 2422 struct ethtool_coalesce *intr_coal, 2423 struct kernel_ethtool_coalesce *kernel_coal, 2424 struct netlink_ext_ack *extack) 2425 { 2426 struct lio *lio = GET_LIO(netdev); 2427 int ret; 2428 struct octeon_device *oct = lio->oct_dev; 2429 struct oct_intrmod_cfg intrmod = {0}; 2430 u32 j, q_no; 2431 int db_max, db_min; 2432 2433 switch (oct->chip_id) { 2434 case OCTEON_CN68XX: 2435 case OCTEON_CN66XX: 2436 db_min = CN6XXX_DB_MIN; 2437 db_max = CN6XXX_DB_MAX; 2438 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 2439 (intr_coal->tx_max_coalesced_frames <= db_max)) { 2440 for (j = 0; j < lio->linfo.num_txpciq; j++) { 2441 q_no = lio->linfo.txpciq[j].s.q_no; 2442 oct->instr_queue[q_no]->fill_threshold = 2443 intr_coal->tx_max_coalesced_frames; 2444 } 2445 } else { 2446 dev_err(&oct->pci_dev->dev, 2447 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 2448 intr_coal->tx_max_coalesced_frames, 2449 db_min, db_max); 2450 return -EINVAL; 2451 } 2452 break; 2453 case OCTEON_CN23XX_PF_VID: 2454 case OCTEON_CN23XX_VF_VID: 2455 break; 2456 default: 2457 return -EINVAL; 2458 } 2459 2460 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 2461 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 2462 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2463 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2464 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2465 2466 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); 2467 2468 if (!intr_coal->use_adaptive_rx_coalesce) { 2469 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); 2470 if (ret) 2471 goto ret_intrmod; 2472 2473 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); 2474 if (ret) 2475 goto ret_intrmod; 2476 } else { 2477 oct->rx_coalesce_usecs = 2478 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2479 oct->rx_max_coalesced_frames = 2480 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2481 } 2482 2483 if (!intr_coal->use_adaptive_tx_coalesce) { 2484 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); 2485 if (ret) 2486 goto ret_intrmod; 2487 } else { 2488 oct->tx_max_coalesced_frames = 2489 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2490 } 2491 2492 return 0; 2493 ret_intrmod: 2494 return ret; 2495 } 2496 2497 static int lio_get_ts_info(struct net_device *netdev, 2498 struct ethtool_ts_info *info) 2499 { 2500 struct lio *lio = GET_LIO(netdev); 2501 2502 info->so_timestamping = 2503 #ifdef PTP_HARDWARE_TIMESTAMPING 2504 SOF_TIMESTAMPING_TX_HARDWARE | 2505 SOF_TIMESTAMPING_RX_HARDWARE | 2506 SOF_TIMESTAMPING_RAW_HARDWARE | 2507 SOF_TIMESTAMPING_TX_SOFTWARE | 2508 #endif 2509 SOF_TIMESTAMPING_RX_SOFTWARE | 2510 SOF_TIMESTAMPING_SOFTWARE; 2511 2512 if (lio->ptp_clock) 2513 info->phc_index = ptp_clock_index(lio->ptp_clock); 2514 else 2515 info->phc_index = -1; 2516 2517 #ifdef PTP_HARDWARE_TIMESTAMPING 2518 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2519 2520 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2521 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 2522 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2523 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 2524 #endif 2525 2526 return 0; 2527 } 2528 2529 /* Return register dump len. */ 2530 static int lio_get_regs_len(struct net_device *dev) 2531 { 2532 struct lio *lio = GET_LIO(dev); 2533 struct octeon_device *oct = lio->oct_dev; 2534 2535 switch (oct->chip_id) { 2536 case OCTEON_CN23XX_PF_VID: 2537 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 2538 case OCTEON_CN23XX_VF_VID: 2539 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 2540 default: 2541 return OCT_ETHTOOL_REGDUMP_LEN; 2542 } 2543 } 2544 2545 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 2546 { 2547 u32 reg; 2548 u8 pf_num = oct->pf_num; 2549 int len = 0; 2550 int i; 2551 2552 /* PCI Window Registers */ 2553 2554 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2555 2556 /*0x29030 or 0x29040*/ 2557 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 2558 len += sprintf(s + len, 2559 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 2560 reg, oct->pcie_port, oct->pf_num, 2561 (u64)octeon_read_csr64(oct, reg)); 2562 2563 /*0x27080 or 0x27090*/ 2564 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 2565 len += 2566 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 2567 reg, oct->pcie_port, oct->pf_num, 2568 (u64)octeon_read_csr64(oct, reg)); 2569 2570 /*0x27000 or 0x27010*/ 2571 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 2572 len += 2573 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 2574 reg, oct->pcie_port, oct->pf_num, 2575 (u64)octeon_read_csr64(oct, reg)); 2576 2577 /*0x29120*/ 2578 reg = 0x29120; 2579 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2580 (u64)octeon_read_csr64(oct, reg)); 2581 2582 /*0x27300*/ 2583 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2584 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2585 len += sprintf( 2586 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2587 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2588 2589 /*0x27200*/ 2590 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2591 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2592 len += sprintf(s + len, 2593 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2594 reg, oct->pcie_port, oct->pf_num, 2595 (u64)octeon_read_csr64(oct, reg)); 2596 2597 /*29130*/ 2598 reg = CN23XX_SLI_PKT_CNT_INT; 2599 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2600 (u64)octeon_read_csr64(oct, reg)); 2601 2602 /*0x29140*/ 2603 reg = CN23XX_SLI_PKT_TIME_INT; 2604 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2605 (u64)octeon_read_csr64(oct, reg)); 2606 2607 /*0x29160*/ 2608 reg = 0x29160; 2609 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2610 (u64)octeon_read_csr64(oct, reg)); 2611 2612 /*0x29180*/ 2613 reg = CN23XX_SLI_OQ_WMARK; 2614 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2615 reg, (u64)octeon_read_csr64(oct, reg)); 2616 2617 /*0x291E0*/ 2618 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2619 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2620 (u64)octeon_read_csr64(oct, reg)); 2621 2622 /*0x29210*/ 2623 reg = CN23XX_SLI_GBL_CONTROL; 2624 len += sprintf(s + len, 2625 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2626 (u64)octeon_read_csr64(oct, reg)); 2627 2628 /*0x29220*/ 2629 reg = 0x29220; 2630 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2631 reg, (u64)octeon_read_csr64(oct, reg)); 2632 2633 /*PF only*/ 2634 if (pf_num == 0) { 2635 /*0x29260*/ 2636 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2637 len += sprintf(s + len, 2638 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2639 reg, (u64)octeon_read_csr64(oct, reg)); 2640 } else if (pf_num == 1) { 2641 /*0x29270*/ 2642 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2643 len += sprintf(s + len, 2644 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2645 reg, (u64)octeon_read_csr64(oct, reg)); 2646 } 2647 2648 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2649 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2650 len += 2651 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2652 reg, i, (u64)octeon_read_csr64(oct, reg)); 2653 } 2654 2655 /*0x10040*/ 2656 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2657 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2658 len += sprintf(s + len, 2659 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2660 reg, i, (u64)octeon_read_csr64(oct, reg)); 2661 } 2662 2663 /*0x10080*/ 2664 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2665 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2666 len += sprintf(s + len, 2667 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2668 reg, i, (u64)octeon_read_csr64(oct, reg)); 2669 } 2670 2671 /*0x10090*/ 2672 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2673 reg = CN23XX_SLI_OQ_SIZE(i); 2674 len += sprintf( 2675 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2676 reg, i, (u64)octeon_read_csr64(oct, reg)); 2677 } 2678 2679 /*0x10050*/ 2680 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2681 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2682 len += sprintf( 2683 s + len, 2684 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2685 reg, i, (u64)octeon_read_csr64(oct, reg)); 2686 } 2687 2688 /*0x10070*/ 2689 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2690 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2691 len += sprintf(s + len, 2692 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2693 reg, i, (u64)octeon_read_csr64(oct, reg)); 2694 } 2695 2696 /*0x100a0*/ 2697 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2698 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2699 len += sprintf(s + len, 2700 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2701 reg, i, (u64)octeon_read_csr64(oct, reg)); 2702 } 2703 2704 /*0x100b0*/ 2705 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2706 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2707 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2708 reg, i, (u64)octeon_read_csr64(oct, reg)); 2709 } 2710 2711 /*0x100c0*/ 2712 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2713 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2714 len += sprintf(s + len, 2715 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2716 reg, i, (u64)octeon_read_csr64(oct, reg)); 2717 2718 /*0x10000*/ 2719 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2720 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2721 len += sprintf( 2722 s + len, 2723 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2724 reg, i, (u64)octeon_read_csr64(oct, reg)); 2725 } 2726 2727 /*0x10010*/ 2728 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2729 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2730 len += sprintf( 2731 s + len, 2732 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2733 i, (u64)octeon_read_csr64(oct, reg)); 2734 } 2735 2736 /*0x10020*/ 2737 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2738 reg = CN23XX_SLI_IQ_DOORBELL(i); 2739 len += sprintf( 2740 s + len, 2741 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2742 reg, i, (u64)octeon_read_csr64(oct, reg)); 2743 } 2744 2745 /*0x10030*/ 2746 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2747 reg = CN23XX_SLI_IQ_SIZE(i); 2748 len += sprintf( 2749 s + len, 2750 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2751 reg, i, (u64)octeon_read_csr64(oct, reg)); 2752 } 2753 2754 /*0x10040*/ 2755 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2756 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2757 len += sprintf(s + len, 2758 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2759 reg, i, (u64)octeon_read_csr64(oct, reg)); 2760 } 2761 2762 return len; 2763 } 2764 2765 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2766 { 2767 int len = 0; 2768 u32 reg; 2769 int i; 2770 2771 /* PCI Window Registers */ 2772 2773 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2774 2775 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2776 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2777 len += sprintf(s + len, 2778 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2779 reg, i, (u64)octeon_read_csr64(oct, reg)); 2780 } 2781 2782 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2783 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2784 len += sprintf(s + len, 2785 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2786 reg, i, (u64)octeon_read_csr64(oct, reg)); 2787 } 2788 2789 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2790 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2791 len += sprintf(s + len, 2792 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2793 reg, i, (u64)octeon_read_csr64(oct, reg)); 2794 } 2795 2796 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2797 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2798 len += sprintf(s + len, 2799 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2800 reg, i, (u64)octeon_read_csr64(oct, reg)); 2801 } 2802 2803 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2804 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2805 len += sprintf(s + len, 2806 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2807 reg, i, (u64)octeon_read_csr64(oct, reg)); 2808 } 2809 2810 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2811 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2812 len += sprintf(s + len, 2813 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2814 reg, i, (u64)octeon_read_csr64(oct, reg)); 2815 } 2816 2817 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2818 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2819 len += sprintf(s + len, 2820 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2821 reg, i, (u64)octeon_read_csr64(oct, reg)); 2822 } 2823 2824 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2825 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2826 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2827 reg, i, (u64)octeon_read_csr64(oct, reg)); 2828 } 2829 2830 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2831 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2832 len += sprintf(s + len, 2833 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2834 reg, i, (u64)octeon_read_csr64(oct, reg)); 2835 } 2836 2837 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2838 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2839 len += sprintf(s + len, 2840 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2841 reg, i, (u64)octeon_read_csr64(oct, reg)); 2842 } 2843 2844 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2845 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2846 len += sprintf(s + len, 2847 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2848 reg, i, (u64)octeon_read_csr64(oct, reg)); 2849 } 2850 2851 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2852 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2853 len += sprintf(s + len, 2854 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2855 reg, i, (u64)octeon_read_csr64(oct, reg)); 2856 } 2857 2858 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2859 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2860 len += sprintf(s + len, 2861 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2862 reg, i, (u64)octeon_read_csr64(oct, reg)); 2863 } 2864 2865 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2866 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2867 len += sprintf(s + len, 2868 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2869 reg, i, (u64)octeon_read_csr64(oct, reg)); 2870 } 2871 2872 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2873 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2874 len += sprintf(s + len, 2875 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2876 reg, i, (u64)octeon_read_csr64(oct, reg)); 2877 } 2878 2879 return len; 2880 } 2881 2882 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2883 { 2884 u32 reg; 2885 int i, len = 0; 2886 2887 /* PCI Window Registers */ 2888 2889 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2890 reg = CN6XXX_WIN_WR_ADDR_LO; 2891 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2892 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2893 reg = CN6XXX_WIN_WR_ADDR_HI; 2894 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2895 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2896 reg = CN6XXX_WIN_RD_ADDR_LO; 2897 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2898 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2899 reg = CN6XXX_WIN_RD_ADDR_HI; 2900 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2901 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2902 reg = CN6XXX_WIN_WR_DATA_LO; 2903 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2904 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2905 reg = CN6XXX_WIN_WR_DATA_HI; 2906 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2907 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2908 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2909 CN6XXX_WIN_WR_MASK_REG, 2910 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2911 2912 /* PCI Interrupt Register */ 2913 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2914 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2915 CN6XXX_SLI_INT_ENB64_PORT0)); 2916 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2917 CN6XXX_SLI_INT_ENB64_PORT1, 2918 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2919 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2920 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2921 2922 /* PCI Output queue registers */ 2923 for (i = 0; i < oct->num_oqs; i++) { 2924 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2925 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2926 reg, i, octeon_read_csr(oct, reg)); 2927 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2928 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2929 reg, i, octeon_read_csr(oct, reg)); 2930 } 2931 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2932 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2933 reg, octeon_read_csr(oct, reg)); 2934 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 2935 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 2936 reg, octeon_read_csr(oct, reg)); 2937 2938 /* PCI Input queue registers */ 2939 for (i = 0; i <= 3; i++) { 2940 u32 reg; 2941 2942 reg = CN6XXX_SLI_IQ_DOORBELL(i); 2943 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 2944 reg, i, octeon_read_csr(oct, reg)); 2945 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 2946 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 2947 reg, i, octeon_read_csr(oct, reg)); 2948 } 2949 2950 /* PCI DMA registers */ 2951 2952 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 2953 CN6XXX_DMA_CNT(0), 2954 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 2955 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 2956 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 2957 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 2958 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 2959 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 2960 CN6XXX_DMA_TIME_INT_LEVEL(0), 2961 octeon_read_csr(oct, reg)); 2962 2963 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 2964 CN6XXX_DMA_CNT(1), 2965 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 2966 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2967 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 2968 CN6XXX_DMA_PKT_INT_LEVEL(1), 2969 octeon_read_csr(oct, reg)); 2970 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2971 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 2972 CN6XXX_DMA_TIME_INT_LEVEL(1), 2973 octeon_read_csr(oct, reg)); 2974 2975 /* PCI Index registers */ 2976 2977 len += sprintf(s + len, "\n"); 2978 2979 for (i = 0; i < 16; i++) { 2980 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 2981 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 2982 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 2983 } 2984 2985 return len; 2986 } 2987 2988 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 2989 { 2990 u32 val; 2991 int i, len = 0; 2992 2993 /* PCI CONFIG Registers */ 2994 2995 len += sprintf(s + len, 2996 "\n\t Octeon Config space Registers\n\n"); 2997 2998 for (i = 0; i <= 13; i++) { 2999 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 3000 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 3001 (i * 4), i, val); 3002 } 3003 3004 for (i = 30; i <= 34; i++) { 3005 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 3006 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 3007 (i * 4), i, val); 3008 } 3009 3010 return len; 3011 } 3012 3013 /* Return register dump user app. */ 3014 static void lio_get_regs(struct net_device *dev, 3015 struct ethtool_regs *regs, void *regbuf) 3016 { 3017 struct lio *lio = GET_LIO(dev); 3018 int len = 0; 3019 struct octeon_device *oct = lio->oct_dev; 3020 3021 regs->version = OCT_ETHTOOL_REGSVER; 3022 3023 switch (oct->chip_id) { 3024 case OCTEON_CN23XX_PF_VID: 3025 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 3026 len += cn23xx_read_csr_reg(regbuf + len, oct); 3027 break; 3028 case OCTEON_CN23XX_VF_VID: 3029 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 3030 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 3031 break; 3032 case OCTEON_CN68XX: 3033 case OCTEON_CN66XX: 3034 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 3035 len += cn6xxx_read_csr_reg(regbuf + len, oct); 3036 len += cn6xxx_read_config_reg(regbuf + len, oct); 3037 break; 3038 default: 3039 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 3040 __func__, oct->chip_id); 3041 } 3042 } 3043 3044 static u32 lio_get_priv_flags(struct net_device *netdev) 3045 { 3046 struct lio *lio = GET_LIO(netdev); 3047 3048 return lio->oct_dev->priv_flags; 3049 } 3050 3051 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 3052 { 3053 struct lio *lio = GET_LIO(netdev); 3054 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 3055 3056 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 3057 intr_by_tx_bytes); 3058 return 0; 3059 } 3060 3061 static int lio_get_fecparam(struct net_device *netdev, 3062 struct ethtool_fecparam *fec) 3063 { 3064 struct lio *lio = GET_LIO(netdev); 3065 struct octeon_device *oct = lio->oct_dev; 3066 3067 fec->active_fec = ETHTOOL_FEC_NONE; 3068 fec->fec = ETHTOOL_FEC_NONE; 3069 3070 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 3071 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 3072 if (oct->no_speed_setting == 1) 3073 return 0; 3074 3075 liquidio_get_fec(lio); 3076 fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF); 3077 if (oct->props[lio->ifidx].fec == 1) 3078 fec->active_fec = ETHTOOL_FEC_RS; 3079 else 3080 fec->active_fec = ETHTOOL_FEC_OFF; 3081 } 3082 3083 return 0; 3084 } 3085 3086 static int lio_set_fecparam(struct net_device *netdev, 3087 struct ethtool_fecparam *fec) 3088 { 3089 struct lio *lio = GET_LIO(netdev); 3090 struct octeon_device *oct = lio->oct_dev; 3091 3092 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 3093 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 3094 if (oct->no_speed_setting == 1) 3095 return -EOPNOTSUPP; 3096 3097 if (fec->fec & ETHTOOL_FEC_OFF) 3098 liquidio_set_fec(lio, 0); 3099 else if (fec->fec & ETHTOOL_FEC_RS) 3100 liquidio_set_fec(lio, 1); 3101 else 3102 return -EOPNOTSUPP; 3103 } else { 3104 return -EOPNOTSUPP; 3105 } 3106 3107 return 0; 3108 } 3109 3110 #define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \ 3111 ETHTOOL_COALESCE_MAX_FRAMES | \ 3112 ETHTOOL_COALESCE_USE_ADAPTIVE | \ 3113 ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \ 3114 ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \ 3115 ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \ 3116 ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \ 3117 ETHTOOL_COALESCE_PKT_RATE_RX_USECS) 3118 3119 static const struct ethtool_ops lio_ethtool_ops = { 3120 .supported_coalesce_params = LIO_ETHTOOL_COALESCE, 3121 .get_link_ksettings = lio_get_link_ksettings, 3122 .set_link_ksettings = lio_set_link_ksettings, 3123 .get_fecparam = lio_get_fecparam, 3124 .set_fecparam = lio_set_fecparam, 3125 .get_link = ethtool_op_get_link, 3126 .get_drvinfo = lio_get_drvinfo, 3127 .get_ringparam = lio_ethtool_get_ringparam, 3128 .set_ringparam = lio_ethtool_set_ringparam, 3129 .get_channels = lio_ethtool_get_channels, 3130 .set_channels = lio_ethtool_set_channels, 3131 .set_phys_id = lio_set_phys_id, 3132 .get_eeprom_len = lio_get_eeprom_len, 3133 .get_eeprom = lio_get_eeprom, 3134 .get_strings = lio_get_strings, 3135 .get_ethtool_stats = lio_get_ethtool_stats, 3136 .get_pauseparam = lio_get_pauseparam, 3137 .set_pauseparam = lio_set_pauseparam, 3138 .get_regs_len = lio_get_regs_len, 3139 .get_regs = lio_get_regs, 3140 .get_msglevel = lio_get_msglevel, 3141 .set_msglevel = lio_set_msglevel, 3142 .get_sset_count = lio_get_sset_count, 3143 .get_coalesce = lio_get_intr_coalesce, 3144 .set_coalesce = lio_set_intr_coalesce, 3145 .get_priv_flags = lio_get_priv_flags, 3146 .set_priv_flags = lio_set_priv_flags, 3147 .get_ts_info = lio_get_ts_info, 3148 }; 3149 3150 static const struct ethtool_ops lio_vf_ethtool_ops = { 3151 .supported_coalesce_params = LIO_ETHTOOL_COALESCE, 3152 .get_link_ksettings = lio_get_link_ksettings, 3153 .get_link = ethtool_op_get_link, 3154 .get_drvinfo = lio_get_vf_drvinfo, 3155 .get_ringparam = lio_ethtool_get_ringparam, 3156 .set_ringparam = lio_ethtool_set_ringparam, 3157 .get_channels = lio_ethtool_get_channels, 3158 .set_channels = lio_ethtool_set_channels, 3159 .get_strings = lio_vf_get_strings, 3160 .get_ethtool_stats = lio_vf_get_ethtool_stats, 3161 .get_regs_len = lio_get_regs_len, 3162 .get_regs = lio_get_regs, 3163 .get_msglevel = lio_get_msglevel, 3164 .set_msglevel = lio_vf_set_msglevel, 3165 .get_sset_count = lio_vf_get_sset_count, 3166 .get_coalesce = lio_get_intr_coalesce, 3167 .set_coalesce = lio_set_intr_coalesce, 3168 .get_priv_flags = lio_get_priv_flags, 3169 .set_priv_flags = lio_set_priv_flags, 3170 .get_ts_info = lio_get_ts_info, 3171 }; 3172 3173 void liquidio_set_ethtool_ops(struct net_device *netdev) 3174 { 3175 struct lio *lio = GET_LIO(netdev); 3176 struct octeon_device *oct = lio->oct_dev; 3177 3178 if (OCTEON_CN23XX_VF(oct)) 3179 netdev->ethtool_ops = &lio_vf_ethtool_ops; 3180 else 3181 netdev->ethtool_ops = &lio_ethtool_ops; 3182 } 3183 EXPORT_SYMBOL_GPL(liquidio_set_ethtool_ops); 3184