1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/netdevice.h> 19 #include <linux/net_tstamp.h> 20 #include <linux/pci.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_nic.h" 27 #include "octeon_main.h" 28 #include "octeon_network.h" 29 #include "cn66xx_regs.h" 30 #include "cn66xx_device.h" 31 #include "cn23xx_pf_device.h" 32 #include "cn23xx_vf_device.h" 33 34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); 35 36 struct oct_intrmod_context { 37 int octeon_id; 38 wait_queue_head_t wc; 39 int cond; 40 int status; 41 }; 42 43 struct oct_intrmod_resp { 44 u64 rh; 45 struct oct_intrmod_cfg intrmod; 46 u64 status; 47 }; 48 49 struct oct_mdio_cmd_context { 50 int octeon_id; 51 wait_queue_head_t wc; 52 int cond; 53 }; 54 55 struct oct_mdio_cmd_resp { 56 u64 rh; 57 struct oct_mdio_cmd resp; 58 u64 status; 59 }; 60 61 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 62 63 /* Octeon's interface mode of operation */ 64 enum { 65 INTERFACE_MODE_DISABLED, 66 INTERFACE_MODE_RGMII, 67 INTERFACE_MODE_GMII, 68 INTERFACE_MODE_SPI, 69 INTERFACE_MODE_PCIE, 70 INTERFACE_MODE_XAUI, 71 INTERFACE_MODE_SGMII, 72 INTERFACE_MODE_PICMG, 73 INTERFACE_MODE_NPI, 74 INTERFACE_MODE_LOOP, 75 INTERFACE_MODE_SRIO, 76 INTERFACE_MODE_ILK, 77 INTERFACE_MODE_RXAUI, 78 INTERFACE_MODE_QSGMII, 79 INTERFACE_MODE_AGL, 80 INTERFACE_MODE_XLAUI, 81 INTERFACE_MODE_XFI, 82 INTERFACE_MODE_10G_KR, 83 INTERFACE_MODE_40G_KR4, 84 INTERFACE_MODE_MIXED, 85 }; 86 87 #define OCT_ETHTOOL_REGDUMP_LEN 4096 88 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 90 #define OCT_ETHTOOL_REGSVER 1 91 92 /* statistics of PF */ 93 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 94 "rx_packets", 95 "tx_packets", 96 "rx_bytes", 97 "tx_bytes", 98 "rx_errors", 99 "tx_errors", 100 "rx_dropped", 101 "tx_dropped", 102 103 "tx_total_sent", 104 "tx_total_fwd", 105 "tx_err_pko", 106 "tx_err_pki", 107 "tx_err_link", 108 "tx_err_drop", 109 110 "tx_tso", 111 "tx_tso_packets", 112 "tx_tso_err", 113 "tx_vxlan", 114 115 "tx_mcast", 116 "tx_bcast", 117 118 "mac_tx_total_pkts", 119 "mac_tx_total_bytes", 120 "mac_tx_mcast_pkts", 121 "mac_tx_bcast_pkts", 122 "mac_tx_ctl_packets", 123 "mac_tx_total_collisions", 124 "mac_tx_one_collision", 125 "mac_tx_multi_collision", 126 "mac_tx_max_collision_fail", 127 "mac_tx_max_deferal_fail", 128 "mac_tx_fifo_err", 129 "mac_tx_runts", 130 131 "rx_total_rcvd", 132 "rx_total_fwd", 133 "rx_mcast", 134 "rx_bcast", 135 "rx_jabber_err", 136 "rx_l2_err", 137 "rx_frame_err", 138 "rx_err_pko", 139 "rx_err_link", 140 "rx_err_drop", 141 142 "rx_vxlan", 143 "rx_vxlan_err", 144 145 "rx_lro_pkts", 146 "rx_lro_bytes", 147 "rx_total_lro", 148 149 "rx_lro_aborts", 150 "rx_lro_aborts_port", 151 "rx_lro_aborts_seq", 152 "rx_lro_aborts_tsval", 153 "rx_lro_aborts_timer", 154 "rx_fwd_rate", 155 156 "mac_rx_total_rcvd", 157 "mac_rx_bytes", 158 "mac_rx_total_bcst", 159 "mac_rx_total_mcst", 160 "mac_rx_runts", 161 "mac_rx_ctl_packets", 162 "mac_rx_fifo_err", 163 "mac_rx_dma_drop", 164 "mac_rx_fcs_err", 165 166 "link_state_changes", 167 }; 168 169 /* statistics of VF */ 170 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 171 "rx_packets", 172 "tx_packets", 173 "rx_bytes", 174 "tx_bytes", 175 "rx_errors", 176 "tx_errors", 177 "rx_dropped", 178 "tx_dropped", 179 "rx_mcast", 180 "tx_mcast", 181 "rx_bcast", 182 "tx_bcast", 183 "link_state_changes", 184 }; 185 186 /* statistics of host tx queue */ 187 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 188 "packets", 189 "bytes", 190 "dropped", 191 "iq_busy", 192 "sgentry_sent", 193 194 "fw_instr_posted", 195 "fw_instr_processed", 196 "fw_instr_dropped", 197 "fw_bytes_sent", 198 199 "tso", 200 "vxlan", 201 "txq_restart", 202 }; 203 204 /* statistics of host rx queue */ 205 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 206 "packets", 207 "bytes", 208 "dropped", 209 "dropped_nomem", 210 "dropped_toomany", 211 "fw_dropped", 212 "fw_pkts_received", 213 "fw_bytes_received", 214 "fw_dropped_nodispatch", 215 216 "vxlan", 217 "buffer_alloc_failure", 218 }; 219 220 /* LiquidIO driver private flags */ 221 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 222 }; 223 224 #define OCTNIC_NCMD_AUTONEG_ON 0x1 225 #define OCTNIC_NCMD_PHY_ON 0x2 226 227 static int lio_get_link_ksettings(struct net_device *netdev, 228 struct ethtool_link_ksettings *ecmd) 229 { 230 struct lio *lio = GET_LIO(netdev); 231 struct octeon_device *oct = lio->oct_dev; 232 struct oct_link_info *linfo; 233 234 linfo = &lio->linfo; 235 236 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 237 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 238 239 switch (linfo->link.s.phy_type) { 240 case LIO_PHY_PORT_TP: 241 ecmd->base.port = PORT_TP; 242 ecmd->base.autoneg = AUTONEG_DISABLE; 243 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 244 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 245 ethtool_link_ksettings_add_link_mode(ecmd, supported, 246 10000baseT_Full); 247 248 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 249 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 250 10000baseT_Full); 251 252 break; 253 254 case LIO_PHY_PORT_FIBRE: 255 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 256 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 257 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || 258 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 259 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); 260 } else { 261 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", 262 linfo->link.s.if_mode); 263 } 264 265 ecmd->base.port = PORT_FIBRE; 266 ecmd->base.autoneg = AUTONEG_DISABLE; 267 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); 268 269 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); 270 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); 271 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 272 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 273 if (OCTEON_CN23XX_PF(oct)) { 274 ethtool_link_ksettings_add_link_mode 275 (ecmd, supported, 25000baseSR_Full); 276 ethtool_link_ksettings_add_link_mode 277 (ecmd, supported, 25000baseKR_Full); 278 ethtool_link_ksettings_add_link_mode 279 (ecmd, supported, 25000baseCR_Full); 280 281 if (oct->no_speed_setting == 0) { 282 ethtool_link_ksettings_add_link_mode 283 (ecmd, supported, 284 10000baseSR_Full); 285 ethtool_link_ksettings_add_link_mode 286 (ecmd, supported, 287 10000baseKR_Full); 288 ethtool_link_ksettings_add_link_mode 289 (ecmd, supported, 290 10000baseCR_Full); 291 } 292 293 if (oct->no_speed_setting == 0) 294 liquidio_get_speed(lio); 295 else 296 oct->speed_setting = 25; 297 298 if (oct->speed_setting == 10) { 299 ethtool_link_ksettings_add_link_mode 300 (ecmd, advertising, 301 10000baseSR_Full); 302 ethtool_link_ksettings_add_link_mode 303 (ecmd, advertising, 304 10000baseKR_Full); 305 ethtool_link_ksettings_add_link_mode 306 (ecmd, advertising, 307 10000baseCR_Full); 308 } 309 if (oct->speed_setting == 25) { 310 ethtool_link_ksettings_add_link_mode 311 (ecmd, advertising, 312 25000baseSR_Full); 313 ethtool_link_ksettings_add_link_mode 314 (ecmd, advertising, 315 25000baseKR_Full); 316 ethtool_link_ksettings_add_link_mode 317 (ecmd, advertising, 318 25000baseCR_Full); 319 } 320 } else { /* VF */ 321 if (linfo->link.s.speed == 10000) { 322 ethtool_link_ksettings_add_link_mode 323 (ecmd, supported, 324 10000baseSR_Full); 325 ethtool_link_ksettings_add_link_mode 326 (ecmd, supported, 327 10000baseKR_Full); 328 ethtool_link_ksettings_add_link_mode 329 (ecmd, supported, 330 10000baseCR_Full); 331 332 ethtool_link_ksettings_add_link_mode 333 (ecmd, advertising, 334 10000baseSR_Full); 335 ethtool_link_ksettings_add_link_mode 336 (ecmd, advertising, 337 10000baseKR_Full); 338 ethtool_link_ksettings_add_link_mode 339 (ecmd, advertising, 340 10000baseCR_Full); 341 } 342 343 if (linfo->link.s.speed == 25000) { 344 ethtool_link_ksettings_add_link_mode 345 (ecmd, supported, 346 25000baseSR_Full); 347 ethtool_link_ksettings_add_link_mode 348 (ecmd, supported, 349 25000baseKR_Full); 350 ethtool_link_ksettings_add_link_mode 351 (ecmd, supported, 352 25000baseCR_Full); 353 354 ethtool_link_ksettings_add_link_mode 355 (ecmd, advertising, 356 25000baseSR_Full); 357 ethtool_link_ksettings_add_link_mode 358 (ecmd, advertising, 359 25000baseKR_Full); 360 ethtool_link_ksettings_add_link_mode 361 (ecmd, advertising, 362 25000baseCR_Full); 363 } 364 } 365 } else { 366 ethtool_link_ksettings_add_link_mode(ecmd, supported, 367 10000baseT_Full); 368 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 369 10000baseT_Full); 370 } 371 break; 372 } 373 374 if (linfo->link.s.link_up) { 375 ecmd->base.speed = linfo->link.s.speed; 376 ecmd->base.duplex = linfo->link.s.duplex; 377 } else { 378 ecmd->base.speed = SPEED_UNKNOWN; 379 ecmd->base.duplex = DUPLEX_UNKNOWN; 380 } 381 382 return 0; 383 } 384 385 static int lio_set_link_ksettings(struct net_device *netdev, 386 const struct ethtool_link_ksettings *ecmd) 387 { 388 const int speed = ecmd->base.speed; 389 struct lio *lio = GET_LIO(netdev); 390 struct oct_link_info *linfo; 391 struct octeon_device *oct; 392 u32 is25G = 0; 393 394 oct = lio->oct_dev; 395 396 linfo = &lio->linfo; 397 398 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || 399 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { 400 is25G = 1; 401 } else { 402 return -EOPNOTSUPP; 403 } 404 405 if (oct->no_speed_setting) { 406 dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", 407 __func__); 408 return -EOPNOTSUPP; 409 } 410 411 if ((ecmd->base.duplex != DUPLEX_UNKNOWN && 412 ecmd->base.duplex != linfo->link.s.duplex) || 413 ecmd->base.autoneg != AUTONEG_DISABLE || 414 (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && 415 ecmd->base.speed != SPEED_UNKNOWN)) 416 return -EOPNOTSUPP; 417 418 if ((oct->speed_boot == speed / 1000) && 419 oct->speed_boot == oct->speed_setting) 420 return 0; 421 422 liquidio_set_speed(lio, speed / 1000); 423 424 dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", 425 oct->speed_setting); 426 427 return 0; 428 } 429 430 static void 431 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 432 { 433 struct lio *lio; 434 struct octeon_device *oct; 435 436 lio = GET_LIO(netdev); 437 oct = lio->oct_dev; 438 439 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 440 strcpy(drvinfo->driver, "liquidio"); 441 strcpy(drvinfo->version, LIQUIDIO_VERSION); 442 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 443 ETHTOOL_FWVERS_LEN); 444 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 445 } 446 447 static void 448 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 449 { 450 struct octeon_device *oct; 451 struct lio *lio; 452 453 lio = GET_LIO(netdev); 454 oct = lio->oct_dev; 455 456 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 457 strcpy(drvinfo->driver, "liquidio_vf"); 458 strcpy(drvinfo->version, LIQUIDIO_VERSION); 459 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 460 ETHTOOL_FWVERS_LEN); 461 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 462 } 463 464 static int 465 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) 466 { 467 struct lio *lio = GET_LIO(netdev); 468 struct octeon_device *oct = lio->oct_dev; 469 struct octnic_ctrl_pkt nctrl; 470 int ret = 0; 471 472 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 473 474 nctrl.ncmd.u64 = 0; 475 nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; 476 nctrl.ncmd.s.param1 = num_queues; 477 nctrl.ncmd.s.param2 = num_queues; 478 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 479 nctrl.wait_time = 100; 480 nctrl.netpndev = (u64)netdev; 481 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 482 483 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 484 if (ret < 0) { 485 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", 486 ret); 487 return -1; 488 } 489 490 return 0; 491 } 492 493 static void 494 lio_ethtool_get_channels(struct net_device *dev, 495 struct ethtool_channels *channel) 496 { 497 struct lio *lio = GET_LIO(dev); 498 struct octeon_device *oct = lio->oct_dev; 499 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 500 u32 combined_count = 0, max_combined = 0; 501 502 if (OCTEON_CN6XXX(oct)) { 503 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 504 505 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 506 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 507 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 508 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 509 } else if (OCTEON_CN23XX_PF(oct)) { 510 if (oct->sriov_info.sriov_enabled) { 511 max_combined = lio->linfo.num_txpciq; 512 } else { 513 struct octeon_config *conf23_pf = 514 CHIP_CONF(oct, cn23xx_pf); 515 516 max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); 517 } 518 combined_count = oct->num_iqs; 519 } else if (OCTEON_CN23XX_VF(oct)) { 520 u64 reg_val = 0ULL; 521 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 522 523 reg_val = octeon_read_csr64(oct, ctrl); 524 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 525 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 526 combined_count = oct->num_iqs; 527 } 528 529 channel->max_rx = max_rx; 530 channel->max_tx = max_tx; 531 channel->max_combined = max_combined; 532 channel->rx_count = rx_count; 533 channel->tx_count = tx_count; 534 channel->combined_count = combined_count; 535 } 536 537 static int 538 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) 539 { 540 struct msix_entry *msix_entries; 541 int num_msix_irqs = 0; 542 int i; 543 544 if (!oct->msix_on) 545 return 0; 546 547 /* Disable the input and output queues now. No more packets will 548 * arrive from Octeon. 549 */ 550 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 551 552 if (oct->msix_on) { 553 if (OCTEON_CN23XX_PF(oct)) 554 num_msix_irqs = oct->num_msix_irqs - 1; 555 else if (OCTEON_CN23XX_VF(oct)) 556 num_msix_irqs = oct->num_msix_irqs; 557 558 msix_entries = (struct msix_entry *)oct->msix_entries; 559 for (i = 0; i < num_msix_irqs; i++) { 560 if (oct->ioq_vector[i].vector) { 561 /* clear the affinity_cpumask */ 562 irq_set_affinity_hint(msix_entries[i].vector, 563 NULL); 564 free_irq(msix_entries[i].vector, 565 &oct->ioq_vector[i]); 566 oct->ioq_vector[i].vector = 0; 567 } 568 } 569 570 /* non-iov vector's argument is oct struct */ 571 if (OCTEON_CN23XX_PF(oct)) 572 free_irq(msix_entries[i].vector, oct); 573 574 pci_disable_msix(oct->pci_dev); 575 kfree(oct->msix_entries); 576 oct->msix_entries = NULL; 577 } 578 579 kfree(oct->irq_name_storage); 580 oct->irq_name_storage = NULL; 581 582 if (octeon_allocate_ioq_vector(oct, num_ioqs)) { 583 dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 584 return -1; 585 } 586 587 if (octeon_setup_interrupt(oct, num_ioqs)) { 588 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); 589 return -1; 590 } 591 592 /* Enable Octeon device interrupts */ 593 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 594 595 return 0; 596 } 597 598 static int 599 lio_ethtool_set_channels(struct net_device *dev, 600 struct ethtool_channels *channel) 601 { 602 u32 combined_count, max_combined; 603 struct lio *lio = GET_LIO(dev); 604 struct octeon_device *oct = lio->oct_dev; 605 int stopped = 0; 606 607 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { 608 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); 609 return -EINVAL; 610 } 611 612 if (!channel->combined_count || channel->other_count || 613 channel->rx_count || channel->tx_count) 614 return -EINVAL; 615 616 combined_count = channel->combined_count; 617 618 if (OCTEON_CN23XX_PF(oct)) { 619 if (oct->sriov_info.sriov_enabled) { 620 max_combined = lio->linfo.num_txpciq; 621 } else { 622 struct octeon_config *conf23_pf = 623 CHIP_CONF(oct, 624 cn23xx_pf); 625 626 max_combined = 627 CFG_GET_IQ_MAX_Q(conf23_pf); 628 } 629 } else if (OCTEON_CN23XX_VF(oct)) { 630 u64 reg_val = 0ULL; 631 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 632 633 reg_val = octeon_read_csr64(oct, ctrl); 634 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 635 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 636 } else { 637 return -EINVAL; 638 } 639 640 if (combined_count > max_combined || combined_count < 1) 641 return -EINVAL; 642 643 if (combined_count == oct->num_iqs) 644 return 0; 645 646 ifstate_set(lio, LIO_IFSTATE_RESETTING); 647 648 if (netif_running(dev)) { 649 dev->netdev_ops->ndo_stop(dev); 650 stopped = 1; 651 } 652 653 if (lio_reset_queues(dev, combined_count)) 654 return -EINVAL; 655 656 if (stopped) 657 dev->netdev_ops->ndo_open(dev); 658 659 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 660 661 return 0; 662 } 663 664 static int lio_get_eeprom_len(struct net_device *netdev) 665 { 666 u8 buf[192]; 667 struct lio *lio = GET_LIO(netdev); 668 struct octeon_device *oct_dev = lio->oct_dev; 669 struct octeon_board_info *board_info; 670 int len; 671 672 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 673 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 674 board_info->name, board_info->serial_number, 675 board_info->major, board_info->minor); 676 677 return len; 678 } 679 680 static int 681 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 682 u8 *bytes) 683 { 684 struct lio *lio = GET_LIO(netdev); 685 struct octeon_device *oct_dev = lio->oct_dev; 686 struct octeon_board_info *board_info; 687 688 if (eeprom->offset) 689 return -EINVAL; 690 691 eeprom->magic = oct_dev->pci_dev->vendor; 692 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 693 sprintf((char *)bytes, 694 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 695 board_info->name, board_info->serial_number, 696 board_info->major, board_info->minor); 697 698 return 0; 699 } 700 701 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 702 { 703 struct lio *lio = GET_LIO(netdev); 704 struct octeon_device *oct = lio->oct_dev; 705 struct octnic_ctrl_pkt nctrl; 706 int ret = 0; 707 708 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 709 710 nctrl.ncmd.u64 = 0; 711 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 712 nctrl.ncmd.s.param1 = addr; 713 nctrl.ncmd.s.param2 = val; 714 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 715 nctrl.wait_time = 100; 716 nctrl.netpndev = (u64)netdev; 717 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 718 719 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 720 if (ret < 0) { 721 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 722 return -EINVAL; 723 } 724 725 return 0; 726 } 727 728 static int octnet_id_active(struct net_device *netdev, int val) 729 { 730 struct lio *lio = GET_LIO(netdev); 731 struct octeon_device *oct = lio->oct_dev; 732 struct octnic_ctrl_pkt nctrl; 733 int ret = 0; 734 735 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 736 737 nctrl.ncmd.u64 = 0; 738 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 739 nctrl.ncmd.s.param1 = val; 740 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 741 nctrl.wait_time = 100; 742 nctrl.netpndev = (u64)netdev; 743 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 744 745 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 746 if (ret < 0) { 747 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 748 return -EINVAL; 749 } 750 751 return 0; 752 } 753 754 /* Callback for when mdio command response arrives 755 */ 756 static void octnet_mdio_resp_callback(struct octeon_device *oct, 757 u32 status, 758 void *buf) 759 { 760 struct oct_mdio_cmd_context *mdio_cmd_ctx; 761 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 762 763 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 764 765 oct = lio_get_device(mdio_cmd_ctx->octeon_id); 766 if (status) { 767 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", 768 CVM_CAST64(status)); 769 WRITE_ONCE(mdio_cmd_ctx->cond, -1); 770 } else { 771 WRITE_ONCE(mdio_cmd_ctx->cond, 1); 772 } 773 wake_up_interruptible(&mdio_cmd_ctx->wc); 774 } 775 776 /* This routine provides PHY access routines for 777 * mdio clause45 . 778 */ 779 static int 780 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 781 { 782 struct octeon_device *oct_dev = lio->oct_dev; 783 struct octeon_soft_command *sc; 784 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 785 struct oct_mdio_cmd_context *mdio_cmd_ctx; 786 struct oct_mdio_cmd *mdio_cmd; 787 int retval = 0; 788 789 sc = (struct octeon_soft_command *) 790 octeon_alloc_soft_command(oct_dev, 791 sizeof(struct oct_mdio_cmd), 792 sizeof(struct oct_mdio_cmd_resp), 793 sizeof(struct oct_mdio_cmd_context)); 794 795 if (!sc) 796 return -ENOMEM; 797 798 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 799 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 800 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 801 802 WRITE_ONCE(mdio_cmd_ctx->cond, 0); 803 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); 804 mdio_cmd->op = op; 805 mdio_cmd->mdio_addr = loc; 806 if (op) 807 mdio_cmd->value1 = *value; 808 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 809 810 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 811 812 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 813 0, 0, 0); 814 815 sc->wait_time = 1000; 816 sc->callback = octnet_mdio_resp_callback; 817 sc->callback_arg = sc; 818 819 init_waitqueue_head(&mdio_cmd_ctx->wc); 820 821 retval = octeon_send_soft_command(oct_dev, sc); 822 823 if (retval == IQ_SEND_FAILED) { 824 dev_err(&oct_dev->pci_dev->dev, 825 "octnet_mdio45_access instruction failed status: %x\n", 826 retval); 827 retval = -EBUSY; 828 } else { 829 /* Sleep on a wait queue till the cond flag indicates that the 830 * response arrived 831 */ 832 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); 833 retval = mdio_cmd_rsp->status; 834 if (retval) { 835 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); 836 retval = -EBUSY; 837 } else { 838 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 839 sizeof(struct oct_mdio_cmd) / 8); 840 841 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { 842 if (!op) 843 *value = mdio_cmd_rsp->resp.value1; 844 } else { 845 retval = -EINVAL; 846 } 847 } 848 } 849 850 octeon_free_soft_command(oct_dev, sc); 851 852 return retval; 853 } 854 855 static int lio_set_phys_id(struct net_device *netdev, 856 enum ethtool_phys_id_state state) 857 { 858 struct lio *lio = GET_LIO(netdev); 859 struct octeon_device *oct = lio->oct_dev; 860 int value, ret; 861 862 switch (state) { 863 case ETHTOOL_ID_ACTIVE: 864 if (oct->chip_id == OCTEON_CN66XX) { 865 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 866 VITESSE_PHY_GPIO_DRIVEON); 867 return 2; 868 869 } else if (oct->chip_id == OCTEON_CN68XX) { 870 /* Save the current LED settings */ 871 ret = octnet_mdio45_access(lio, 0, 872 LIO68XX_LED_BEACON_ADDR, 873 &lio->phy_beacon_val); 874 if (ret) 875 return ret; 876 877 ret = octnet_mdio45_access(lio, 0, 878 LIO68XX_LED_CTRL_ADDR, 879 &lio->led_ctrl_val); 880 if (ret) 881 return ret; 882 883 /* Configure Beacon values */ 884 value = LIO68XX_LED_BEACON_CFGON; 885 ret = octnet_mdio45_access(lio, 1, 886 LIO68XX_LED_BEACON_ADDR, 887 &value); 888 if (ret) 889 return ret; 890 891 value = LIO68XX_LED_CTRL_CFGON; 892 ret = octnet_mdio45_access(lio, 1, 893 LIO68XX_LED_CTRL_ADDR, 894 &value); 895 if (ret) 896 return ret; 897 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 898 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 899 900 /* returns 0 since updates are asynchronous */ 901 return 0; 902 } else { 903 return -EINVAL; 904 } 905 break; 906 907 case ETHTOOL_ID_ON: 908 if (oct->chip_id == OCTEON_CN66XX) 909 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 910 VITESSE_PHY_GPIO_HIGH); 911 else 912 return -EINVAL; 913 914 break; 915 916 case ETHTOOL_ID_OFF: 917 if (oct->chip_id == OCTEON_CN66XX) 918 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 919 VITESSE_PHY_GPIO_LOW); 920 else 921 return -EINVAL; 922 923 break; 924 925 case ETHTOOL_ID_INACTIVE: 926 if (oct->chip_id == OCTEON_CN66XX) { 927 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 928 VITESSE_PHY_GPIO_DRIVEOFF); 929 } else if (oct->chip_id == OCTEON_CN68XX) { 930 /* Restore LED settings */ 931 ret = octnet_mdio45_access(lio, 1, 932 LIO68XX_LED_CTRL_ADDR, 933 &lio->led_ctrl_val); 934 if (ret) 935 return ret; 936 937 ret = octnet_mdio45_access(lio, 1, 938 LIO68XX_LED_BEACON_ADDR, 939 &lio->phy_beacon_val); 940 if (ret) 941 return ret; 942 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 943 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 944 945 return 0; 946 } else { 947 return -EINVAL; 948 } 949 break; 950 951 default: 952 return -EINVAL; 953 } 954 955 return 0; 956 } 957 958 static void 959 lio_ethtool_get_ringparam(struct net_device *netdev, 960 struct ethtool_ringparam *ering) 961 { 962 struct lio *lio = GET_LIO(netdev); 963 struct octeon_device *oct = lio->oct_dev; 964 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 965 rx_pending = 0; 966 967 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 968 return; 969 970 if (OCTEON_CN6XXX(oct)) { 971 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 972 973 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 974 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 975 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 976 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 977 } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { 978 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 979 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 980 rx_pending = oct->droq[0]->max_count; 981 tx_pending = oct->instr_queue[0]->max_count; 982 } 983 984 ering->tx_pending = tx_pending; 985 ering->tx_max_pending = tx_max_pending; 986 ering->rx_pending = rx_pending; 987 ering->rx_max_pending = rx_max_pending; 988 ering->rx_mini_pending = 0; 989 ering->rx_jumbo_pending = 0; 990 ering->rx_mini_max_pending = 0; 991 ering->rx_jumbo_max_pending = 0; 992 } 993 994 static int lio_23xx_reconfigure_queue_count(struct lio *lio) 995 { 996 struct octeon_device *oct = lio->oct_dev; 997 struct liquidio_if_cfg_context *ctx; 998 u32 resp_size, ctx_size, data_size; 999 struct liquidio_if_cfg_resp *resp; 1000 struct octeon_soft_command *sc; 1001 union oct_nic_if_cfg if_cfg; 1002 struct lio_version *vdata; 1003 u32 ifidx_or_pfnum; 1004 int retval; 1005 int j; 1006 1007 resp_size = sizeof(struct liquidio_if_cfg_resp); 1008 ctx_size = sizeof(struct liquidio_if_cfg_context); 1009 data_size = sizeof(struct lio_version); 1010 sc = (struct octeon_soft_command *) 1011 octeon_alloc_soft_command(oct, data_size, 1012 resp_size, ctx_size); 1013 if (!sc) { 1014 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", 1015 __func__); 1016 return -1; 1017 } 1018 1019 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1020 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1021 vdata = (struct lio_version *)sc->virtdptr; 1022 1023 vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1024 vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1025 vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1026 1027 ifidx_or_pfnum = oct->pf_num; 1028 WRITE_ONCE(ctx->cond, 0); 1029 ctx->octeon_id = lio_get_device_id(oct); 1030 init_waitqueue_head(&ctx->wc); 1031 1032 if_cfg.u64 = 0; 1033 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; 1034 if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; 1035 if_cfg.s.base_queue = oct->sriov_info.pf_srn; 1036 if_cfg.s.gmx_port_id = oct->pf_num; 1037 1038 sc->iq_no = 0; 1039 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1040 OPCODE_NIC_QCOUNT_UPDATE, 0, 1041 if_cfg.u64, 0); 1042 sc->callback = lio_if_cfg_callback; 1043 sc->callback_arg = sc; 1044 sc->wait_time = LIO_IFCFG_WAIT_TIME; 1045 1046 retval = octeon_send_soft_command(oct, sc); 1047 if (retval == IQ_SEND_FAILED) { 1048 dev_err(&oct->pci_dev->dev, 1049 "iq/oq config failed status: %x\n", 1050 retval); 1051 goto qcount_update_fail; 1052 } 1053 1054 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 1055 dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); 1056 return -1; 1057 } 1058 1059 retval = resp->status; 1060 if (retval) { 1061 dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); 1062 goto qcount_update_fail; 1063 } 1064 1065 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 1066 (sizeof(struct liquidio_if_cfg_info)) >> 3); 1067 1068 lio->ifidx = ifidx_or_pfnum; 1069 lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); 1070 lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); 1071 for (j = 0; j < lio->linfo.num_rxpciq; j++) { 1072 lio->linfo.rxpciq[j].u64 = 1073 resp->cfg_info.linfo.rxpciq[j].u64; 1074 } 1075 1076 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1077 lio->linfo.txpciq[j].u64 = 1078 resp->cfg_info.linfo.txpciq[j].u64; 1079 } 1080 1081 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1082 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1083 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 1084 lio->txq = lio->linfo.txpciq[0].s.q_no; 1085 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 1086 1087 octeon_free_soft_command(oct, sc); 1088 dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", 1089 lio->linfo.num_rxpciq); 1090 1091 return 0; 1092 1093 qcount_update_fail: 1094 octeon_free_soft_command(oct, sc); 1095 1096 return -1; 1097 } 1098 1099 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) 1100 { 1101 struct lio *lio = GET_LIO(netdev); 1102 struct octeon_device *oct = lio->oct_dev; 1103 int i, queue_count_update = 0; 1104 struct napi_struct *napi, *n; 1105 int ret; 1106 1107 schedule_timeout_uninterruptible(msecs_to_jiffies(100)); 1108 1109 if (wait_for_pending_requests(oct)) 1110 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1111 1112 if (lio_wait_for_instr_fetch(oct)) 1113 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1114 1115 if (octeon_set_io_queues_off(oct)) { 1116 dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); 1117 return -1; 1118 } 1119 1120 /* Disable the input and output queues now. No more packets will 1121 * arrive from Octeon. 1122 */ 1123 oct->fn_list.disable_io_queues(oct); 1124 /* Delete NAPI */ 1125 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1126 netif_napi_del(napi); 1127 1128 if (num_qs != oct->num_iqs) { 1129 ret = netif_set_real_num_rx_queues(netdev, num_qs); 1130 if (ret) { 1131 dev_err(&oct->pci_dev->dev, 1132 "Setting real number rx failed\n"); 1133 return ret; 1134 } 1135 1136 ret = netif_set_real_num_tx_queues(netdev, num_qs); 1137 if (ret) { 1138 dev_err(&oct->pci_dev->dev, 1139 "Setting real number tx failed\n"); 1140 return ret; 1141 } 1142 1143 /* The value of queue_count_update decides whether it is the 1144 * queue count or the descriptor count that is being 1145 * re-configured. 1146 */ 1147 queue_count_update = 1; 1148 } 1149 1150 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled 1151 * and SRIOV disabled. Few things like recreating queue zero, resetting 1152 * glists and IRQs are required for both. For the latter, some more 1153 * steps like updating sriov_info for the octeon device need to be done. 1154 */ 1155 if (queue_count_update) { 1156 lio_delete_glists(lio); 1157 1158 /* Delete mbox for PF which is SRIOV disabled because sriov_info 1159 * will be now changed. 1160 */ 1161 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) 1162 oct->fn_list.free_mbox(oct); 1163 } 1164 1165 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1166 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1167 continue; 1168 octeon_delete_droq(oct, i); 1169 } 1170 1171 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1172 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1173 continue; 1174 octeon_delete_instr_queue(oct, i); 1175 } 1176 1177 if (queue_count_update) { 1178 /* For PF re-configure sriov related information */ 1179 if ((OCTEON_CN23XX_PF(oct)) && 1180 !oct->sriov_info.sriov_enabled) { 1181 oct->sriov_info.num_pf_rings = num_qs; 1182 if (cn23xx_sriov_config(oct)) { 1183 dev_err(&oct->pci_dev->dev, 1184 "Queue reset aborted: SRIOV config failed\n"); 1185 return -1; 1186 } 1187 1188 num_qs = oct->sriov_info.num_pf_rings; 1189 } 1190 } 1191 1192 if (oct->fn_list.setup_device_regs(oct)) { 1193 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); 1194 return -1; 1195 } 1196 1197 /* The following are needed in case of queue count re-configuration and 1198 * not for descriptor count re-configuration. 1199 */ 1200 if (queue_count_update) { 1201 if (octeon_setup_instr_queues(oct)) 1202 return -1; 1203 1204 if (octeon_setup_output_queues(oct)) 1205 return -1; 1206 1207 /* Recreating mbox for PF that is SRIOV disabled */ 1208 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1209 if (oct->fn_list.setup_mbox(oct)) { 1210 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 1211 return -1; 1212 } 1213 } 1214 1215 /* Deleting and recreating IRQs whether the interface is SRIOV 1216 * enabled or disabled. 1217 */ 1218 if (lio_irq_reallocate_irqs(oct, num_qs)) { 1219 dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); 1220 return -1; 1221 } 1222 1223 /* Enable the input and output queues for this Octeon device */ 1224 if (oct->fn_list.enable_io_queues(oct)) { 1225 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); 1226 return -1; 1227 } 1228 1229 for (i = 0; i < oct->num_oqs; i++) 1230 writel(oct->droq[i]->max_count, 1231 oct->droq[i]->pkts_credit_reg); 1232 1233 /* Informing firmware about the new queue count. It is required 1234 * for firmware to allocate more number of queues than those at 1235 * load time. 1236 */ 1237 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { 1238 if (lio_23xx_reconfigure_queue_count(lio)) 1239 return -1; 1240 } 1241 } 1242 1243 /* Once firmware is aware of the new value, queues can be recreated */ 1244 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { 1245 dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); 1246 return -1; 1247 } 1248 1249 if (queue_count_update) { 1250 if (lio_setup_glists(oct, lio, num_qs)) { 1251 dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); 1252 return -1; 1253 } 1254 1255 /* Send firmware the information about new number of queues 1256 * if the interface is a VF or a PF that is SRIOV enabled. 1257 */ 1258 if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) 1259 if (lio_send_queue_count_update(netdev, num_qs)) 1260 return -1; 1261 } 1262 1263 return 0; 1264 } 1265 1266 static int lio_ethtool_set_ringparam(struct net_device *netdev, 1267 struct ethtool_ringparam *ering) 1268 { 1269 u32 rx_count, tx_count, rx_count_old, tx_count_old; 1270 struct lio *lio = GET_LIO(netdev); 1271 struct octeon_device *oct = lio->oct_dev; 1272 int stopped = 0; 1273 1274 if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) 1275 return -EINVAL; 1276 1277 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 1278 return -EINVAL; 1279 1280 rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, 1281 CN23XX_MAX_OQ_DESCRIPTORS); 1282 tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, 1283 CN23XX_MAX_IQ_DESCRIPTORS); 1284 1285 rx_count_old = oct->droq[0]->max_count; 1286 tx_count_old = oct->instr_queue[0]->max_count; 1287 1288 if (rx_count == rx_count_old && tx_count == tx_count_old) 1289 return 0; 1290 1291 ifstate_set(lio, LIO_IFSTATE_RESETTING); 1292 1293 if (netif_running(netdev)) { 1294 netdev->netdev_ops->ndo_stop(netdev); 1295 stopped = 1; 1296 } 1297 1298 /* Change RX/TX DESCS count */ 1299 if (tx_count != tx_count_old) 1300 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1301 tx_count); 1302 if (rx_count != rx_count_old) 1303 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1304 rx_count); 1305 1306 if (lio_reset_queues(netdev, oct->num_iqs)) 1307 goto err_lio_reset_queues; 1308 1309 if (stopped) 1310 netdev->netdev_ops->ndo_open(netdev); 1311 1312 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 1313 1314 return 0; 1315 1316 err_lio_reset_queues: 1317 if (tx_count != tx_count_old) 1318 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1319 tx_count_old); 1320 if (rx_count != rx_count_old) 1321 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1322 rx_count_old); 1323 return -EINVAL; 1324 } 1325 1326 static u32 lio_get_msglevel(struct net_device *netdev) 1327 { 1328 struct lio *lio = GET_LIO(netdev); 1329 1330 return lio->msg_enable; 1331 } 1332 1333 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 1334 { 1335 struct lio *lio = GET_LIO(netdev); 1336 1337 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 1338 if (msglvl & NETIF_MSG_HW) 1339 liquidio_set_feature(netdev, 1340 OCTNET_CMD_VERBOSE_ENABLE, 0); 1341 else 1342 liquidio_set_feature(netdev, 1343 OCTNET_CMD_VERBOSE_DISABLE, 0); 1344 } 1345 1346 lio->msg_enable = msglvl; 1347 } 1348 1349 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl) 1350 { 1351 struct lio *lio = GET_LIO(netdev); 1352 1353 lio->msg_enable = msglvl; 1354 } 1355 1356 static void 1357 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1358 { 1359 /* Notes: Not supporting any auto negotiation in these 1360 * drivers. Just report pause frame support. 1361 */ 1362 struct lio *lio = GET_LIO(netdev); 1363 struct octeon_device *oct = lio->oct_dev; 1364 1365 pause->autoneg = 0; 1366 1367 pause->tx_pause = oct->tx_pause; 1368 pause->rx_pause = oct->rx_pause; 1369 } 1370 1371 static int 1372 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1373 { 1374 /* Notes: Not supporting any auto negotiation in these 1375 * drivers. 1376 */ 1377 struct lio *lio = GET_LIO(netdev); 1378 struct octeon_device *oct = lio->oct_dev; 1379 struct octnic_ctrl_pkt nctrl; 1380 struct oct_link_info *linfo = &lio->linfo; 1381 1382 int ret = 0; 1383 1384 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 1385 return -EINVAL; 1386 1387 if (linfo->link.s.duplex == 0) { 1388 /*no flow control for half duplex*/ 1389 if (pause->rx_pause || pause->tx_pause) 1390 return -EINVAL; 1391 } 1392 1393 /*do not support autoneg of link flow control*/ 1394 if (pause->autoneg == AUTONEG_ENABLE) 1395 return -EINVAL; 1396 1397 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1398 1399 nctrl.ncmd.u64 = 0; 1400 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 1401 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1402 nctrl.wait_time = 100; 1403 nctrl.netpndev = (u64)netdev; 1404 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1405 1406 if (pause->rx_pause) { 1407 /*enable rx pause*/ 1408 nctrl.ncmd.s.param1 = 1; 1409 } else { 1410 /*disable rx pause*/ 1411 nctrl.ncmd.s.param1 = 0; 1412 } 1413 1414 if (pause->tx_pause) { 1415 /*enable tx pause*/ 1416 nctrl.ncmd.s.param2 = 1; 1417 } else { 1418 /*disable tx pause*/ 1419 nctrl.ncmd.s.param2 = 0; 1420 } 1421 1422 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1423 if (ret < 0) { 1424 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); 1425 return -EINVAL; 1426 } 1427 1428 oct->rx_pause = pause->rx_pause; 1429 oct->tx_pause = pause->tx_pause; 1430 1431 return 0; 1432 } 1433 1434 static void 1435 lio_get_ethtool_stats(struct net_device *netdev, 1436 struct ethtool_stats *stats __attribute__((unused)), 1437 u64 *data) 1438 { 1439 struct lio *lio = GET_LIO(netdev); 1440 struct octeon_device *oct_dev = lio->oct_dev; 1441 struct rtnl_link_stats64 lstats; 1442 int i = 0, j; 1443 1444 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1445 return; 1446 1447 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1448 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1449 data[i++] = lstats.rx_packets; 1450 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 1451 data[i++] = lstats.tx_packets; 1452 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1453 data[i++] = lstats.rx_bytes; 1454 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1455 data[i++] = lstats.tx_bytes; 1456 data[i++] = lstats.rx_errors + 1457 oct_dev->link_stats.fromwire.fcs_err + 1458 oct_dev->link_stats.fromwire.jabber_err + 1459 oct_dev->link_stats.fromwire.l2_err + 1460 oct_dev->link_stats.fromwire.frame_err; 1461 data[i++] = lstats.tx_errors; 1462 /*sum of oct->droq[oq_no]->stats->rx_dropped + 1463 *oct->droq[oq_no]->stats->dropped_nodispatch + 1464 *oct->droq[oq_no]->stats->dropped_toomany + 1465 *oct->droq[oq_no]->stats->dropped_nomem 1466 */ 1467 data[i++] = lstats.rx_dropped + 1468 oct_dev->link_stats.fromwire.fifo_err + 1469 oct_dev->link_stats.fromwire.dmac_drop + 1470 oct_dev->link_stats.fromwire.red_drops + 1471 oct_dev->link_stats.fromwire.fw_err_pko + 1472 oct_dev->link_stats.fromwire.fw_err_link + 1473 oct_dev->link_stats.fromwire.fw_err_drop; 1474 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1475 data[i++] = lstats.tx_dropped + 1476 oct_dev->link_stats.fromhost.max_collision_fail + 1477 oct_dev->link_stats.fromhost.max_deferral_fail + 1478 oct_dev->link_stats.fromhost.total_collisions + 1479 oct_dev->link_stats.fromhost.fw_err_pko + 1480 oct_dev->link_stats.fromhost.fw_err_link + 1481 oct_dev->link_stats.fromhost.fw_err_drop + 1482 oct_dev->link_stats.fromhost.fw_err_pki; 1483 1484 /* firmware tx stats */ 1485 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 1486 *fromhost.fw_total_sent 1487 */ 1488 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 1489 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 1490 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 1491 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 1492 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 1493 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ 1494 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); 1495 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 1496 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 1497 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1498 *fw_err_drop 1499 */ 1500 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 1501 1502 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 1503 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 1504 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1505 *fw_tso_fwd 1506 */ 1507 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 1508 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1509 *fw_err_tso 1510 */ 1511 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 1512 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1513 *fw_tx_vxlan 1514 */ 1515 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 1516 1517 /* Multicast packets sent by this port */ 1518 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1519 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1520 1521 /* mac tx statistics */ 1522 /*CVMX_BGXX_CMRX_TX_STAT5 */ 1523 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 1524 /*CVMX_BGXX_CMRX_TX_STAT4 */ 1525 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 1526 /*CVMX_BGXX_CMRX_TX_STAT15 */ 1527 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 1528 /*CVMX_BGXX_CMRX_TX_STAT14 */ 1529 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 1530 /*CVMX_BGXX_CMRX_TX_STAT17 */ 1531 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 1532 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1533 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 1534 /*CVMX_BGXX_CMRX_TX_STAT3 */ 1535 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 1536 /*CVMX_BGXX_CMRX_TX_STAT2 */ 1537 data[i++] = 1538 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 1539 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1540 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 1541 /*CVMX_BGXX_CMRX_TX_STAT1 */ 1542 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 1543 /*CVMX_BGXX_CMRX_TX_STAT16 */ 1544 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 1545 /*CVMX_BGXX_CMRX_TX_STAT6 */ 1546 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 1547 1548 /* RX firmware stats */ 1549 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1550 *fw_total_rcvd 1551 */ 1552 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 1553 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1554 *fw_total_fwd 1555 */ 1556 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 1557 /* Multicast packets received on this port */ 1558 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1559 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1560 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 1561 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 1562 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 1563 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 1564 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 1565 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 1566 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1567 *fw_err_pko 1568 */ 1569 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 1570 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 1571 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 1572 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1573 *fromwire.fw_err_drop 1574 */ 1575 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 1576 1577 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1578 *fromwire.fw_rx_vxlan 1579 */ 1580 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 1581 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1582 *fromwire.fw_rx_vxlan_err 1583 */ 1584 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 1585 1586 /* LRO */ 1587 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1588 *fw_lro_pkts 1589 */ 1590 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 1591 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1592 *fw_lro_octs 1593 */ 1594 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 1595 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 1596 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 1597 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1598 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 1599 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1600 *fw_lro_aborts_port 1601 */ 1602 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 1603 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1604 *fw_lro_aborts_seq 1605 */ 1606 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 1607 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1608 *fw_lro_aborts_tsval 1609 */ 1610 data[i++] = 1611 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 1612 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1613 *fw_lro_aborts_timer 1614 */ 1615 /* intrmod: packet forward rate */ 1616 data[i++] = 1617 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 1618 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1619 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 1620 1621 /* mac: link-level stats */ 1622 /*CVMX_BGXX_CMRX_RX_STAT0 */ 1623 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 1624 /*CVMX_BGXX_CMRX_RX_STAT1 */ 1625 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 1626 /*CVMX_PKI_STATX_STAT5 */ 1627 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 1628 /*CVMX_PKI_STATX_STAT5 */ 1629 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 1630 /*wqe->word2.err_code or wqe->word2.err_level */ 1631 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 1632 /*CVMX_BGXX_CMRX_RX_STAT2 */ 1633 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 1634 /*CVMX_BGXX_CMRX_RX_STAT6 */ 1635 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 1636 /*CVMX_BGXX_CMRX_RX_STAT4 */ 1637 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 1638 /*wqe->word2.err_code or wqe->word2.err_level */ 1639 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 1640 /*lio->link_changes*/ 1641 data[i++] = CVM_CAST64(lio->link_changes); 1642 1643 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 1644 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 1645 continue; 1646 /*packets to network port*/ 1647 /*# of packets tx to network */ 1648 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1649 /*# of bytes tx to network */ 1650 data[i++] = 1651 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1652 /*# of packets dropped */ 1653 data[i++] = 1654 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 1655 /*# of tx fails due to queue full */ 1656 data[i++] = 1657 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 1658 /*XXX gather entries sent */ 1659 data[i++] = 1660 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 1661 1662 /*instruction to firmware: data and control */ 1663 /*# of instructions to the queue */ 1664 data[i++] = 1665 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 1666 /*# of instructions processed */ 1667 data[i++] = CVM_CAST64( 1668 oct_dev->instr_queue[j]->stats.instr_processed); 1669 /*# of instructions could not be processed */ 1670 data[i++] = CVM_CAST64( 1671 oct_dev->instr_queue[j]->stats.instr_dropped); 1672 /*bytes sent through the queue */ 1673 data[i++] = 1674 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 1675 1676 /*tso request*/ 1677 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1678 /*vxlan request*/ 1679 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1680 /*txq restart*/ 1681 data[i++] = 1682 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 1683 } 1684 1685 /* RX */ 1686 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 1687 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 1688 continue; 1689 1690 /*packets send to TCP/IP network stack */ 1691 /*# of packets to network stack */ 1692 data[i++] = 1693 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 1694 /*# of bytes to network stack */ 1695 data[i++] = 1696 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 1697 /*# of packets dropped */ 1698 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1699 oct_dev->droq[j]->stats.dropped_toomany + 1700 oct_dev->droq[j]->stats.rx_dropped); 1701 data[i++] = 1702 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1703 data[i++] = 1704 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1705 data[i++] = 1706 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1707 1708 /*control and data path*/ 1709 data[i++] = 1710 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1711 data[i++] = 1712 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1713 data[i++] = 1714 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1715 1716 data[i++] = 1717 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1718 data[i++] = 1719 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1720 } 1721 } 1722 1723 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1724 struct ethtool_stats *stats 1725 __attribute__((unused)), 1726 u64 *data) 1727 { 1728 struct rtnl_link_stats64 lstats; 1729 struct lio *lio = GET_LIO(netdev); 1730 struct octeon_device *oct_dev = lio->oct_dev; 1731 int i = 0, j, vj; 1732 1733 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1734 return; 1735 1736 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); 1737 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1738 data[i++] = lstats.rx_packets; 1739 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1740 data[i++] = lstats.tx_packets; 1741 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1742 data[i++] = lstats.rx_bytes; 1743 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1744 data[i++] = lstats.tx_bytes; 1745 data[i++] = lstats.rx_errors; 1746 data[i++] = lstats.tx_errors; 1747 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1748 * oct->droq[oq_no]->stats->dropped_nodispatch + 1749 * oct->droq[oq_no]->stats->dropped_toomany + 1750 * oct->droq[oq_no]->stats->dropped_nomem 1751 */ 1752 data[i++] = lstats.rx_dropped; 1753 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1754 data[i++] = lstats.tx_dropped; 1755 1756 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; 1757 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; 1758 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; 1759 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; 1760 1761 /* lio->link_changes */ 1762 data[i++] = CVM_CAST64(lio->link_changes); 1763 1764 for (vj = 0; vj < oct_dev->num_iqs; vj++) { 1765 j = lio->linfo.txpciq[vj].s.q_no; 1766 1767 /* packets to network port */ 1768 /* # of packets tx to network */ 1769 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1770 /* # of bytes tx to network */ 1771 data[i++] = CVM_CAST64( 1772 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1773 /* # of packets dropped */ 1774 data[i++] = CVM_CAST64( 1775 oct_dev->instr_queue[j]->stats.tx_dropped); 1776 /* # of tx fails due to queue full */ 1777 data[i++] = CVM_CAST64( 1778 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1779 /* XXX gather entries sent */ 1780 data[i++] = CVM_CAST64( 1781 oct_dev->instr_queue[j]->stats.sgentry_sent); 1782 1783 /* instruction to firmware: data and control */ 1784 /* # of instructions to the queue */ 1785 data[i++] = CVM_CAST64( 1786 oct_dev->instr_queue[j]->stats.instr_posted); 1787 /* # of instructions processed */ 1788 data[i++] = 1789 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1790 /* # of instructions could not be processed */ 1791 data[i++] = 1792 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1793 /* bytes sent through the queue */ 1794 data[i++] = CVM_CAST64( 1795 oct_dev->instr_queue[j]->stats.bytes_sent); 1796 /* tso request */ 1797 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1798 /* vxlan request */ 1799 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1800 /* txq restart */ 1801 data[i++] = CVM_CAST64( 1802 oct_dev->instr_queue[j]->stats.tx_restart); 1803 } 1804 1805 /* RX */ 1806 for (vj = 0; vj < oct_dev->num_oqs; vj++) { 1807 j = lio->linfo.rxpciq[vj].s.q_no; 1808 1809 /* packets send to TCP/IP network stack */ 1810 /* # of packets to network stack */ 1811 data[i++] = CVM_CAST64( 1812 oct_dev->droq[j]->stats.rx_pkts_received); 1813 /* # of bytes to network stack */ 1814 data[i++] = CVM_CAST64( 1815 oct_dev->droq[j]->stats.rx_bytes_received); 1816 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1817 oct_dev->droq[j]->stats.dropped_toomany + 1818 oct_dev->droq[j]->stats.rx_dropped); 1819 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1820 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1821 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1822 1823 /* control and data path */ 1824 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1825 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1826 data[i++] = 1827 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1828 1829 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1830 data[i++] = 1831 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1832 } 1833 } 1834 1835 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1836 { 1837 struct octeon_device *oct_dev = lio->oct_dev; 1838 int i; 1839 1840 switch (oct_dev->chip_id) { 1841 case OCTEON_CN23XX_PF_VID: 1842 case OCTEON_CN23XX_VF_VID: 1843 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1844 sprintf(data, "%s", oct_priv_flags_strings[i]); 1845 data += ETH_GSTRING_LEN; 1846 } 1847 break; 1848 case OCTEON_CN68XX: 1849 case OCTEON_CN66XX: 1850 break; 1851 default: 1852 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1853 break; 1854 } 1855 } 1856 1857 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1858 { 1859 struct lio *lio = GET_LIO(netdev); 1860 struct octeon_device *oct_dev = lio->oct_dev; 1861 int num_iq_stats, num_oq_stats, i, j; 1862 int num_stats; 1863 1864 switch (stringset) { 1865 case ETH_SS_STATS: 1866 num_stats = ARRAY_SIZE(oct_stats_strings); 1867 for (j = 0; j < num_stats; j++) { 1868 sprintf(data, "%s", oct_stats_strings[j]); 1869 data += ETH_GSTRING_LEN; 1870 } 1871 1872 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1873 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1874 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1875 continue; 1876 for (j = 0; j < num_iq_stats; j++) { 1877 sprintf(data, "tx-%d-%s", i, 1878 oct_iq_stats_strings[j]); 1879 data += ETH_GSTRING_LEN; 1880 } 1881 } 1882 1883 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1884 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1885 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1886 continue; 1887 for (j = 0; j < num_oq_stats; j++) { 1888 sprintf(data, "rx-%d-%s", i, 1889 oct_droq_stats_strings[j]); 1890 data += ETH_GSTRING_LEN; 1891 } 1892 } 1893 break; 1894 1895 case ETH_SS_PRIV_FLAGS: 1896 lio_get_priv_flags_strings(lio, data); 1897 break; 1898 default: 1899 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1900 break; 1901 } 1902 } 1903 1904 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1905 u8 *data) 1906 { 1907 int num_iq_stats, num_oq_stats, i, j; 1908 struct lio *lio = GET_LIO(netdev); 1909 struct octeon_device *oct_dev = lio->oct_dev; 1910 int num_stats; 1911 1912 switch (stringset) { 1913 case ETH_SS_STATS: 1914 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1915 for (j = 0; j < num_stats; j++) { 1916 sprintf(data, "%s", oct_vf_stats_strings[j]); 1917 data += ETH_GSTRING_LEN; 1918 } 1919 1920 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1921 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1922 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1923 continue; 1924 for (j = 0; j < num_iq_stats; j++) { 1925 sprintf(data, "tx-%d-%s", i, 1926 oct_iq_stats_strings[j]); 1927 data += ETH_GSTRING_LEN; 1928 } 1929 } 1930 1931 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1932 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1933 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1934 continue; 1935 for (j = 0; j < num_oq_stats; j++) { 1936 sprintf(data, "rx-%d-%s", i, 1937 oct_droq_stats_strings[j]); 1938 data += ETH_GSTRING_LEN; 1939 } 1940 } 1941 break; 1942 1943 case ETH_SS_PRIV_FLAGS: 1944 lio_get_priv_flags_strings(lio, data); 1945 break; 1946 default: 1947 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1948 break; 1949 } 1950 } 1951 1952 static int lio_get_priv_flags_ss_count(struct lio *lio) 1953 { 1954 struct octeon_device *oct_dev = lio->oct_dev; 1955 1956 switch (oct_dev->chip_id) { 1957 case OCTEON_CN23XX_PF_VID: 1958 case OCTEON_CN23XX_VF_VID: 1959 return ARRAY_SIZE(oct_priv_flags_strings); 1960 case OCTEON_CN68XX: 1961 case OCTEON_CN66XX: 1962 return -EOPNOTSUPP; 1963 default: 1964 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1965 return -EOPNOTSUPP; 1966 } 1967 } 1968 1969 static int lio_get_sset_count(struct net_device *netdev, int sset) 1970 { 1971 struct lio *lio = GET_LIO(netdev); 1972 struct octeon_device *oct_dev = lio->oct_dev; 1973 1974 switch (sset) { 1975 case ETH_SS_STATS: 1976 return (ARRAY_SIZE(oct_stats_strings) + 1977 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1978 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1979 case ETH_SS_PRIV_FLAGS: 1980 return lio_get_priv_flags_ss_count(lio); 1981 default: 1982 return -EOPNOTSUPP; 1983 } 1984 } 1985 1986 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 1987 { 1988 struct lio *lio = GET_LIO(netdev); 1989 struct octeon_device *oct_dev = lio->oct_dev; 1990 1991 switch (sset) { 1992 case ETH_SS_STATS: 1993 return (ARRAY_SIZE(oct_vf_stats_strings) + 1994 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1995 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1996 case ETH_SS_PRIV_FLAGS: 1997 return lio_get_priv_flags_ss_count(lio); 1998 default: 1999 return -EOPNOTSUPP; 2000 } 2001 } 2002 2003 /* Callback function for intrmod */ 2004 static void octnet_intrmod_callback(struct octeon_device *oct_dev, 2005 u32 status, 2006 void *ptr) 2007 { 2008 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 2009 struct oct_intrmod_context *ctx; 2010 2011 ctx = (struct oct_intrmod_context *)sc->ctxptr; 2012 2013 ctx->status = status; 2014 2015 WRITE_ONCE(ctx->cond, 1); 2016 2017 /* This barrier is required to be sure that the response has been 2018 * written fully before waking up the handler 2019 */ 2020 wmb(); 2021 2022 wake_up_interruptible(&ctx->wc); 2023 } 2024 2025 /* get interrupt moderation parameters */ 2026 static int octnet_get_intrmod_cfg(struct lio *lio, 2027 struct oct_intrmod_cfg *intr_cfg) 2028 { 2029 struct octeon_soft_command *sc; 2030 struct oct_intrmod_context *ctx; 2031 struct oct_intrmod_resp *resp; 2032 int retval; 2033 struct octeon_device *oct_dev = lio->oct_dev; 2034 2035 /* Alloc soft command */ 2036 sc = (struct octeon_soft_command *) 2037 octeon_alloc_soft_command(oct_dev, 2038 0, 2039 sizeof(struct oct_intrmod_resp), 2040 sizeof(struct oct_intrmod_context)); 2041 2042 if (!sc) 2043 return -ENOMEM; 2044 2045 resp = (struct oct_intrmod_resp *)sc->virtrptr; 2046 memset(resp, 0, sizeof(struct oct_intrmod_resp)); 2047 2048 ctx = (struct oct_intrmod_context *)sc->ctxptr; 2049 memset(ctx, 0, sizeof(struct oct_intrmod_context)); 2050 WRITE_ONCE(ctx->cond, 0); 2051 ctx->octeon_id = lio_get_device_id(oct_dev); 2052 init_waitqueue_head(&ctx->wc); 2053 2054 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2055 2056 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2057 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); 2058 2059 sc->callback = octnet_intrmod_callback; 2060 sc->callback_arg = sc; 2061 sc->wait_time = 1000; 2062 2063 retval = octeon_send_soft_command(oct_dev, sc); 2064 if (retval == IQ_SEND_FAILED) { 2065 octeon_free_soft_command(oct_dev, sc); 2066 return -EINVAL; 2067 } 2068 2069 /* Sleep on a wait queue till the cond flag indicates that the 2070 * response arrived or timed-out. 2071 */ 2072 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 2073 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); 2074 goto intrmod_info_wait_intr; 2075 } 2076 2077 retval = ctx->status || resp->status; 2078 if (retval) { 2079 dev_err(&oct_dev->pci_dev->dev, 2080 "Get interrupt moderation parameters failed\n"); 2081 goto intrmod_info_wait_fail; 2082 } 2083 2084 octeon_swap_8B_data((u64 *)&resp->intrmod, 2085 (sizeof(struct oct_intrmod_cfg)) / 8); 2086 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); 2087 octeon_free_soft_command(oct_dev, sc); 2088 2089 return 0; 2090 2091 intrmod_info_wait_fail: 2092 2093 octeon_free_soft_command(oct_dev, sc); 2094 2095 intrmod_info_wait_intr: 2096 2097 return -ENODEV; 2098 } 2099 2100 /* Configure interrupt moderation parameters */ 2101 static int octnet_set_intrmod_cfg(struct lio *lio, 2102 struct oct_intrmod_cfg *intr_cfg) 2103 { 2104 struct octeon_soft_command *sc; 2105 struct oct_intrmod_context *ctx; 2106 struct oct_intrmod_cfg *cfg; 2107 int retval; 2108 struct octeon_device *oct_dev = lio->oct_dev; 2109 2110 /* Alloc soft command */ 2111 sc = (struct octeon_soft_command *) 2112 octeon_alloc_soft_command(oct_dev, 2113 sizeof(struct oct_intrmod_cfg), 2114 0, 2115 sizeof(struct oct_intrmod_context)); 2116 2117 if (!sc) 2118 return -ENOMEM; 2119 2120 ctx = (struct oct_intrmod_context *)sc->ctxptr; 2121 2122 WRITE_ONCE(ctx->cond, 0); 2123 ctx->octeon_id = lio_get_device_id(oct_dev); 2124 init_waitqueue_head(&ctx->wc); 2125 2126 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 2127 2128 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 2129 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 2130 2131 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2132 2133 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 2134 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 2135 2136 sc->callback = octnet_intrmod_callback; 2137 sc->callback_arg = sc; 2138 sc->wait_time = 1000; 2139 2140 retval = octeon_send_soft_command(oct_dev, sc); 2141 if (retval == IQ_SEND_FAILED) { 2142 octeon_free_soft_command(oct_dev, sc); 2143 return -EINVAL; 2144 } 2145 2146 /* Sleep on a wait queue till the cond flag indicates that the 2147 * response arrived or timed-out. 2148 */ 2149 if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { 2150 retval = ctx->status; 2151 if (retval) 2152 dev_err(&oct_dev->pci_dev->dev, 2153 "intrmod config failed. Status: %llx\n", 2154 CVM_CAST64(retval)); 2155 else 2156 dev_info(&oct_dev->pci_dev->dev, 2157 "Rx-Adaptive Interrupt moderation %s\n", 2158 (intr_cfg->rx_enable) ? 2159 "enabled" : "disabled"); 2160 2161 octeon_free_soft_command(oct_dev, sc); 2162 2163 return ((retval) ? -ENODEV : 0); 2164 } 2165 2166 dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); 2167 2168 return -EINTR; 2169 } 2170 2171 static int lio_get_intr_coalesce(struct net_device *netdev, 2172 struct ethtool_coalesce *intr_coal) 2173 { 2174 struct lio *lio = GET_LIO(netdev); 2175 struct octeon_device *oct = lio->oct_dev; 2176 struct octeon_instr_queue *iq; 2177 struct oct_intrmod_cfg intrmod_cfg; 2178 2179 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) 2180 return -ENODEV; 2181 2182 switch (oct->chip_id) { 2183 case OCTEON_CN23XX_PF_VID: 2184 case OCTEON_CN23XX_VF_VID: { 2185 if (!intrmod_cfg.rx_enable) { 2186 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; 2187 intr_coal->rx_max_coalesced_frames = 2188 oct->rx_max_coalesced_frames; 2189 } 2190 if (!intrmod_cfg.tx_enable) 2191 intr_coal->tx_max_coalesced_frames = 2192 oct->tx_max_coalesced_frames; 2193 break; 2194 } 2195 case OCTEON_CN68XX: 2196 case OCTEON_CN66XX: { 2197 struct octeon_cn6xxx *cn6xxx = 2198 (struct octeon_cn6xxx *)oct->chip; 2199 2200 if (!intrmod_cfg.rx_enable) { 2201 intr_coal->rx_coalesce_usecs = 2202 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 2203 intr_coal->rx_max_coalesced_frames = 2204 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 2205 } 2206 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 2207 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 2208 break; 2209 } 2210 default: 2211 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 2212 return -EINVAL; 2213 } 2214 if (intrmod_cfg.rx_enable) { 2215 intr_coal->use_adaptive_rx_coalesce = 2216 intrmod_cfg.rx_enable; 2217 intr_coal->rate_sample_interval = 2218 intrmod_cfg.check_intrvl; 2219 intr_coal->pkt_rate_high = 2220 intrmod_cfg.maxpkt_ratethr; 2221 intr_coal->pkt_rate_low = 2222 intrmod_cfg.minpkt_ratethr; 2223 intr_coal->rx_max_coalesced_frames_high = 2224 intrmod_cfg.rx_maxcnt_trigger; 2225 intr_coal->rx_coalesce_usecs_high = 2226 intrmod_cfg.rx_maxtmr_trigger; 2227 intr_coal->rx_coalesce_usecs_low = 2228 intrmod_cfg.rx_mintmr_trigger; 2229 intr_coal->rx_max_coalesced_frames_low = 2230 intrmod_cfg.rx_mincnt_trigger; 2231 } 2232 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 2233 (intrmod_cfg.tx_enable)) { 2234 intr_coal->use_adaptive_tx_coalesce = 2235 intrmod_cfg.tx_enable; 2236 intr_coal->tx_max_coalesced_frames_high = 2237 intrmod_cfg.tx_maxcnt_trigger; 2238 intr_coal->tx_max_coalesced_frames_low = 2239 intrmod_cfg.tx_mincnt_trigger; 2240 } 2241 return 0; 2242 } 2243 2244 /* Enable/Disable auto interrupt Moderation */ 2245 static int oct_cfg_adaptive_intr(struct lio *lio, 2246 struct oct_intrmod_cfg *intrmod_cfg, 2247 struct ethtool_coalesce *intr_coal) 2248 { 2249 int ret = 0; 2250 2251 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { 2252 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; 2253 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; 2254 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; 2255 } 2256 if (intrmod_cfg->rx_enable) { 2257 intrmod_cfg->rx_maxcnt_trigger = 2258 intr_coal->rx_max_coalesced_frames_high; 2259 intrmod_cfg->rx_maxtmr_trigger = 2260 intr_coal->rx_coalesce_usecs_high; 2261 intrmod_cfg->rx_mintmr_trigger = 2262 intr_coal->rx_coalesce_usecs_low; 2263 intrmod_cfg->rx_mincnt_trigger = 2264 intr_coal->rx_max_coalesced_frames_low; 2265 } 2266 if (intrmod_cfg->tx_enable) { 2267 intrmod_cfg->tx_maxcnt_trigger = 2268 intr_coal->tx_max_coalesced_frames_high; 2269 intrmod_cfg->tx_mincnt_trigger = 2270 intr_coal->tx_max_coalesced_frames_low; 2271 } 2272 2273 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 2274 2275 return ret; 2276 } 2277 2278 static int 2279 oct_cfg_rx_intrcnt(struct lio *lio, 2280 struct oct_intrmod_cfg *intrmod, 2281 struct ethtool_coalesce *intr_coal) 2282 { 2283 struct octeon_device *oct = lio->oct_dev; 2284 u32 rx_max_coalesced_frames; 2285 2286 /* Config Cnt based interrupt values */ 2287 switch (oct->chip_id) { 2288 case OCTEON_CN68XX: 2289 case OCTEON_CN66XX: { 2290 struct octeon_cn6xxx *cn6xxx = 2291 (struct octeon_cn6xxx *)oct->chip; 2292 2293 if (!intr_coal->rx_max_coalesced_frames) 2294 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 2295 else 2296 rx_max_coalesced_frames = 2297 intr_coal->rx_max_coalesced_frames; 2298 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 2299 rx_max_coalesced_frames); 2300 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 2301 break; 2302 } 2303 case OCTEON_CN23XX_PF_VID: { 2304 int q_no; 2305 2306 if (!intr_coal->rx_max_coalesced_frames) 2307 rx_max_coalesced_frames = intrmod->rx_frames; 2308 else 2309 rx_max_coalesced_frames = 2310 intr_coal->rx_max_coalesced_frames; 2311 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2312 q_no += oct->sriov_info.pf_srn; 2313 octeon_write_csr64( 2314 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2315 (octeon_read_csr64( 2316 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2317 (0x3fffff00000000UL)) | 2318 (rx_max_coalesced_frames - 1)); 2319 /*consider setting resend bit*/ 2320 } 2321 intrmod->rx_frames = rx_max_coalesced_frames; 2322 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2323 break; 2324 } 2325 case OCTEON_CN23XX_VF_VID: { 2326 int q_no; 2327 2328 if (!intr_coal->rx_max_coalesced_frames) 2329 rx_max_coalesced_frames = intrmod->rx_frames; 2330 else 2331 rx_max_coalesced_frames = 2332 intr_coal->rx_max_coalesced_frames; 2333 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2334 octeon_write_csr64( 2335 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2336 (octeon_read_csr64( 2337 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2338 (0x3fffff00000000UL)) | 2339 (rx_max_coalesced_frames - 1)); 2340 /*consider writing to resend bit here*/ 2341 } 2342 intrmod->rx_frames = rx_max_coalesced_frames; 2343 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2344 break; 2345 } 2346 default: 2347 return -EINVAL; 2348 } 2349 return 0; 2350 } 2351 2352 static int oct_cfg_rx_intrtime(struct lio *lio, 2353 struct oct_intrmod_cfg *intrmod, 2354 struct ethtool_coalesce *intr_coal) 2355 { 2356 struct octeon_device *oct = lio->oct_dev; 2357 u32 time_threshold, rx_coalesce_usecs; 2358 2359 /* Config Time based interrupt values */ 2360 switch (oct->chip_id) { 2361 case OCTEON_CN68XX: 2362 case OCTEON_CN66XX: { 2363 struct octeon_cn6xxx *cn6xxx = 2364 (struct octeon_cn6xxx *)oct->chip; 2365 if (!intr_coal->rx_coalesce_usecs) 2366 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 2367 else 2368 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2369 2370 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 2371 rx_coalesce_usecs); 2372 octeon_write_csr(oct, 2373 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 2374 time_threshold); 2375 2376 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 2377 break; 2378 } 2379 case OCTEON_CN23XX_PF_VID: { 2380 u64 time_threshold; 2381 int q_no; 2382 2383 if (!intr_coal->rx_coalesce_usecs) 2384 rx_coalesce_usecs = intrmod->rx_usecs; 2385 else 2386 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2387 time_threshold = 2388 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2389 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2390 q_no += oct->sriov_info.pf_srn; 2391 octeon_write_csr64(oct, 2392 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2393 (intrmod->rx_frames | 2394 ((u64)time_threshold << 32))); 2395 /*consider writing to resend bit here*/ 2396 } 2397 intrmod->rx_usecs = rx_coalesce_usecs; 2398 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2399 break; 2400 } 2401 case OCTEON_CN23XX_VF_VID: { 2402 u64 time_threshold; 2403 int q_no; 2404 2405 if (!intr_coal->rx_coalesce_usecs) 2406 rx_coalesce_usecs = intrmod->rx_usecs; 2407 else 2408 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2409 2410 time_threshold = 2411 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2412 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2413 octeon_write_csr64( 2414 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2415 (intrmod->rx_frames | 2416 ((u64)time_threshold << 32))); 2417 /*consider setting resend bit*/ 2418 } 2419 intrmod->rx_usecs = rx_coalesce_usecs; 2420 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2421 break; 2422 } 2423 default: 2424 return -EINVAL; 2425 } 2426 2427 return 0; 2428 } 2429 2430 static int 2431 oct_cfg_tx_intrcnt(struct lio *lio, 2432 struct oct_intrmod_cfg *intrmod, 2433 struct ethtool_coalesce *intr_coal) 2434 { 2435 struct octeon_device *oct = lio->oct_dev; 2436 u32 iq_intr_pkt; 2437 void __iomem *inst_cnt_reg; 2438 u64 val; 2439 2440 /* Config Cnt based interrupt values */ 2441 switch (oct->chip_id) { 2442 case OCTEON_CN68XX: 2443 case OCTEON_CN66XX: 2444 break; 2445 case OCTEON_CN23XX_VF_VID: 2446 case OCTEON_CN23XX_PF_VID: { 2447 int q_no; 2448 2449 if (!intr_coal->tx_max_coalesced_frames) 2450 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 2451 CN23XX_PKT_IN_DONE_WMARK_MASK; 2452 else 2453 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 2454 CN23XX_PKT_IN_DONE_WMARK_MASK; 2455 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 2456 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 2457 val = readq(inst_cnt_reg); 2458 /*clear wmark and count.dont want to write count back*/ 2459 val = (val & 0xFFFF000000000000ULL) | 2460 ((u64)(iq_intr_pkt - 1) 2461 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 2462 writeq(val, inst_cnt_reg); 2463 /*consider setting resend bit*/ 2464 } 2465 intrmod->tx_frames = iq_intr_pkt; 2466 oct->tx_max_coalesced_frames = iq_intr_pkt; 2467 break; 2468 } 2469 default: 2470 return -EINVAL; 2471 } 2472 return 0; 2473 } 2474 2475 static int lio_set_intr_coalesce(struct net_device *netdev, 2476 struct ethtool_coalesce *intr_coal) 2477 { 2478 struct lio *lio = GET_LIO(netdev); 2479 int ret; 2480 struct octeon_device *oct = lio->oct_dev; 2481 struct oct_intrmod_cfg intrmod = {0}; 2482 u32 j, q_no; 2483 int db_max, db_min; 2484 2485 switch (oct->chip_id) { 2486 case OCTEON_CN68XX: 2487 case OCTEON_CN66XX: 2488 db_min = CN6XXX_DB_MIN; 2489 db_max = CN6XXX_DB_MAX; 2490 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 2491 (intr_coal->tx_max_coalesced_frames <= db_max)) { 2492 for (j = 0; j < lio->linfo.num_txpciq; j++) { 2493 q_no = lio->linfo.txpciq[j].s.q_no; 2494 oct->instr_queue[q_no]->fill_threshold = 2495 intr_coal->tx_max_coalesced_frames; 2496 } 2497 } else { 2498 dev_err(&oct->pci_dev->dev, 2499 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 2500 intr_coal->tx_max_coalesced_frames, 2501 db_min, db_max); 2502 return -EINVAL; 2503 } 2504 break; 2505 case OCTEON_CN23XX_PF_VID: 2506 case OCTEON_CN23XX_VF_VID: 2507 break; 2508 default: 2509 return -EINVAL; 2510 } 2511 2512 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 2513 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 2514 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2515 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2516 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2517 2518 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); 2519 2520 if (!intr_coal->use_adaptive_rx_coalesce) { 2521 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); 2522 if (ret) 2523 goto ret_intrmod; 2524 2525 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); 2526 if (ret) 2527 goto ret_intrmod; 2528 } else { 2529 oct->rx_coalesce_usecs = 2530 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2531 oct->rx_max_coalesced_frames = 2532 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2533 } 2534 2535 if (!intr_coal->use_adaptive_tx_coalesce) { 2536 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); 2537 if (ret) 2538 goto ret_intrmod; 2539 } else { 2540 oct->tx_max_coalesced_frames = 2541 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2542 } 2543 2544 return 0; 2545 ret_intrmod: 2546 return ret; 2547 } 2548 2549 static int lio_get_ts_info(struct net_device *netdev, 2550 struct ethtool_ts_info *info) 2551 { 2552 struct lio *lio = GET_LIO(netdev); 2553 2554 info->so_timestamping = 2555 #ifdef PTP_HARDWARE_TIMESTAMPING 2556 SOF_TIMESTAMPING_TX_HARDWARE | 2557 SOF_TIMESTAMPING_RX_HARDWARE | 2558 SOF_TIMESTAMPING_RAW_HARDWARE | 2559 SOF_TIMESTAMPING_TX_SOFTWARE | 2560 #endif 2561 SOF_TIMESTAMPING_RX_SOFTWARE | 2562 SOF_TIMESTAMPING_SOFTWARE; 2563 2564 if (lio->ptp_clock) 2565 info->phc_index = ptp_clock_index(lio->ptp_clock); 2566 else 2567 info->phc_index = -1; 2568 2569 #ifdef PTP_HARDWARE_TIMESTAMPING 2570 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2571 2572 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2573 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 2574 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2575 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 2576 #endif 2577 2578 return 0; 2579 } 2580 2581 /* Return register dump len. */ 2582 static int lio_get_regs_len(struct net_device *dev) 2583 { 2584 struct lio *lio = GET_LIO(dev); 2585 struct octeon_device *oct = lio->oct_dev; 2586 2587 switch (oct->chip_id) { 2588 case OCTEON_CN23XX_PF_VID: 2589 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 2590 case OCTEON_CN23XX_VF_VID: 2591 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 2592 default: 2593 return OCT_ETHTOOL_REGDUMP_LEN; 2594 } 2595 } 2596 2597 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 2598 { 2599 u32 reg; 2600 u8 pf_num = oct->pf_num; 2601 int len = 0; 2602 int i; 2603 2604 /* PCI Window Registers */ 2605 2606 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2607 2608 /*0x29030 or 0x29040*/ 2609 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 2610 len += sprintf(s + len, 2611 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 2612 reg, oct->pcie_port, oct->pf_num, 2613 (u64)octeon_read_csr64(oct, reg)); 2614 2615 /*0x27080 or 0x27090*/ 2616 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 2617 len += 2618 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 2619 reg, oct->pcie_port, oct->pf_num, 2620 (u64)octeon_read_csr64(oct, reg)); 2621 2622 /*0x27000 or 0x27010*/ 2623 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 2624 len += 2625 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 2626 reg, oct->pcie_port, oct->pf_num, 2627 (u64)octeon_read_csr64(oct, reg)); 2628 2629 /*0x29120*/ 2630 reg = 0x29120; 2631 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2632 (u64)octeon_read_csr64(oct, reg)); 2633 2634 /*0x27300*/ 2635 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2636 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2637 len += sprintf( 2638 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2639 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2640 2641 /*0x27200*/ 2642 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2643 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2644 len += sprintf(s + len, 2645 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2646 reg, oct->pcie_port, oct->pf_num, 2647 (u64)octeon_read_csr64(oct, reg)); 2648 2649 /*29130*/ 2650 reg = CN23XX_SLI_PKT_CNT_INT; 2651 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2652 (u64)octeon_read_csr64(oct, reg)); 2653 2654 /*0x29140*/ 2655 reg = CN23XX_SLI_PKT_TIME_INT; 2656 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2657 (u64)octeon_read_csr64(oct, reg)); 2658 2659 /*0x29160*/ 2660 reg = 0x29160; 2661 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2662 (u64)octeon_read_csr64(oct, reg)); 2663 2664 /*0x29180*/ 2665 reg = CN23XX_SLI_OQ_WMARK; 2666 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2667 reg, (u64)octeon_read_csr64(oct, reg)); 2668 2669 /*0x291E0*/ 2670 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2671 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2672 (u64)octeon_read_csr64(oct, reg)); 2673 2674 /*0x29210*/ 2675 reg = CN23XX_SLI_GBL_CONTROL; 2676 len += sprintf(s + len, 2677 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2678 (u64)octeon_read_csr64(oct, reg)); 2679 2680 /*0x29220*/ 2681 reg = 0x29220; 2682 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2683 reg, (u64)octeon_read_csr64(oct, reg)); 2684 2685 /*PF only*/ 2686 if (pf_num == 0) { 2687 /*0x29260*/ 2688 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2689 len += sprintf(s + len, 2690 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2691 reg, (u64)octeon_read_csr64(oct, reg)); 2692 } else if (pf_num == 1) { 2693 /*0x29270*/ 2694 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2695 len += sprintf(s + len, 2696 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2697 reg, (u64)octeon_read_csr64(oct, reg)); 2698 } 2699 2700 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2701 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2702 len += 2703 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2704 reg, i, (u64)octeon_read_csr64(oct, reg)); 2705 } 2706 2707 /*0x10040*/ 2708 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2709 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2710 len += sprintf(s + len, 2711 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2712 reg, i, (u64)octeon_read_csr64(oct, reg)); 2713 } 2714 2715 /*0x10080*/ 2716 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2717 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2718 len += sprintf(s + len, 2719 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2720 reg, i, (u64)octeon_read_csr64(oct, reg)); 2721 } 2722 2723 /*0x10090*/ 2724 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2725 reg = CN23XX_SLI_OQ_SIZE(i); 2726 len += sprintf( 2727 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2728 reg, i, (u64)octeon_read_csr64(oct, reg)); 2729 } 2730 2731 /*0x10050*/ 2732 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2733 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2734 len += sprintf( 2735 s + len, 2736 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2737 reg, i, (u64)octeon_read_csr64(oct, reg)); 2738 } 2739 2740 /*0x10070*/ 2741 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2742 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2743 len += sprintf(s + len, 2744 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2745 reg, i, (u64)octeon_read_csr64(oct, reg)); 2746 } 2747 2748 /*0x100a0*/ 2749 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2750 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2751 len += sprintf(s + len, 2752 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2753 reg, i, (u64)octeon_read_csr64(oct, reg)); 2754 } 2755 2756 /*0x100b0*/ 2757 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2758 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2759 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2760 reg, i, (u64)octeon_read_csr64(oct, reg)); 2761 } 2762 2763 /*0x100c0*/ 2764 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2765 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2766 len += sprintf(s + len, 2767 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2768 reg, i, (u64)octeon_read_csr64(oct, reg)); 2769 2770 /*0x10000*/ 2771 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2772 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2773 len += sprintf( 2774 s + len, 2775 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2776 reg, i, (u64)octeon_read_csr64(oct, reg)); 2777 } 2778 2779 /*0x10010*/ 2780 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2781 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2782 len += sprintf( 2783 s + len, 2784 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2785 i, (u64)octeon_read_csr64(oct, reg)); 2786 } 2787 2788 /*0x10020*/ 2789 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2790 reg = CN23XX_SLI_IQ_DOORBELL(i); 2791 len += sprintf( 2792 s + len, 2793 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2794 reg, i, (u64)octeon_read_csr64(oct, reg)); 2795 } 2796 2797 /*0x10030*/ 2798 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2799 reg = CN23XX_SLI_IQ_SIZE(i); 2800 len += sprintf( 2801 s + len, 2802 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2803 reg, i, (u64)octeon_read_csr64(oct, reg)); 2804 } 2805 2806 /*0x10040*/ 2807 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2808 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2809 len += sprintf(s + len, 2810 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2811 reg, i, (u64)octeon_read_csr64(oct, reg)); 2812 } 2813 2814 return len; 2815 } 2816 2817 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2818 { 2819 int len = 0; 2820 u32 reg; 2821 int i; 2822 2823 /* PCI Window Registers */ 2824 2825 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2826 2827 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2828 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2829 len += sprintf(s + len, 2830 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2831 reg, i, (u64)octeon_read_csr64(oct, reg)); 2832 } 2833 2834 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2835 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2836 len += sprintf(s + len, 2837 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2838 reg, i, (u64)octeon_read_csr64(oct, reg)); 2839 } 2840 2841 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2842 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2843 len += sprintf(s + len, 2844 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2845 reg, i, (u64)octeon_read_csr64(oct, reg)); 2846 } 2847 2848 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2849 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2850 len += sprintf(s + len, 2851 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2852 reg, i, (u64)octeon_read_csr64(oct, reg)); 2853 } 2854 2855 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2856 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2857 len += sprintf(s + len, 2858 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2859 reg, i, (u64)octeon_read_csr64(oct, reg)); 2860 } 2861 2862 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2863 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2864 len += sprintf(s + len, 2865 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2866 reg, i, (u64)octeon_read_csr64(oct, reg)); 2867 } 2868 2869 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2870 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2871 len += sprintf(s + len, 2872 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2873 reg, i, (u64)octeon_read_csr64(oct, reg)); 2874 } 2875 2876 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2877 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2878 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2879 reg, i, (u64)octeon_read_csr64(oct, reg)); 2880 } 2881 2882 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2883 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2884 len += sprintf(s + len, 2885 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2886 reg, i, (u64)octeon_read_csr64(oct, reg)); 2887 } 2888 2889 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2890 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2891 len += sprintf(s + len, 2892 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2893 reg, i, (u64)octeon_read_csr64(oct, reg)); 2894 } 2895 2896 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2897 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2898 len += sprintf(s + len, 2899 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2900 reg, i, (u64)octeon_read_csr64(oct, reg)); 2901 } 2902 2903 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2904 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2905 len += sprintf(s + len, 2906 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2907 reg, i, (u64)octeon_read_csr64(oct, reg)); 2908 } 2909 2910 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2911 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2912 len += sprintf(s + len, 2913 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2914 reg, i, (u64)octeon_read_csr64(oct, reg)); 2915 } 2916 2917 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2918 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2919 len += sprintf(s + len, 2920 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2921 reg, i, (u64)octeon_read_csr64(oct, reg)); 2922 } 2923 2924 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2925 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2926 len += sprintf(s + len, 2927 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2928 reg, i, (u64)octeon_read_csr64(oct, reg)); 2929 } 2930 2931 return len; 2932 } 2933 2934 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2935 { 2936 u32 reg; 2937 int i, len = 0; 2938 2939 /* PCI Window Registers */ 2940 2941 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2942 reg = CN6XXX_WIN_WR_ADDR_LO; 2943 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2944 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2945 reg = CN6XXX_WIN_WR_ADDR_HI; 2946 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2947 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2948 reg = CN6XXX_WIN_RD_ADDR_LO; 2949 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2950 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2951 reg = CN6XXX_WIN_RD_ADDR_HI; 2952 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2953 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2954 reg = CN6XXX_WIN_WR_DATA_LO; 2955 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2956 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2957 reg = CN6XXX_WIN_WR_DATA_HI; 2958 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2959 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2960 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2961 CN6XXX_WIN_WR_MASK_REG, 2962 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2963 2964 /* PCI Interrupt Register */ 2965 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2966 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2967 CN6XXX_SLI_INT_ENB64_PORT0)); 2968 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2969 CN6XXX_SLI_INT_ENB64_PORT1, 2970 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2971 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2972 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2973 2974 /* PCI Output queue registers */ 2975 for (i = 0; i < oct->num_oqs; i++) { 2976 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2977 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2978 reg, i, octeon_read_csr(oct, reg)); 2979 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2980 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2981 reg, i, octeon_read_csr(oct, reg)); 2982 } 2983 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2984 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2985 reg, octeon_read_csr(oct, reg)); 2986 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 2987 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 2988 reg, octeon_read_csr(oct, reg)); 2989 2990 /* PCI Input queue registers */ 2991 for (i = 0; i <= 3; i++) { 2992 u32 reg; 2993 2994 reg = CN6XXX_SLI_IQ_DOORBELL(i); 2995 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 2996 reg, i, octeon_read_csr(oct, reg)); 2997 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 2998 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 2999 reg, i, octeon_read_csr(oct, reg)); 3000 } 3001 3002 /* PCI DMA registers */ 3003 3004 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 3005 CN6XXX_DMA_CNT(0), 3006 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 3007 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 3008 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 3009 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 3010 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 3011 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 3012 CN6XXX_DMA_TIME_INT_LEVEL(0), 3013 octeon_read_csr(oct, reg)); 3014 3015 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 3016 CN6XXX_DMA_CNT(1), 3017 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 3018 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 3019 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 3020 CN6XXX_DMA_PKT_INT_LEVEL(1), 3021 octeon_read_csr(oct, reg)); 3022 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 3023 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 3024 CN6XXX_DMA_TIME_INT_LEVEL(1), 3025 octeon_read_csr(oct, reg)); 3026 3027 /* PCI Index registers */ 3028 3029 len += sprintf(s + len, "\n"); 3030 3031 for (i = 0; i < 16; i++) { 3032 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 3033 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 3034 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 3035 } 3036 3037 return len; 3038 } 3039 3040 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 3041 { 3042 u32 val; 3043 int i, len = 0; 3044 3045 /* PCI CONFIG Registers */ 3046 3047 len += sprintf(s + len, 3048 "\n\t Octeon Config space Registers\n\n"); 3049 3050 for (i = 0; i <= 13; i++) { 3051 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 3052 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 3053 (i * 4), i, val); 3054 } 3055 3056 for (i = 30; i <= 34; i++) { 3057 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 3058 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 3059 (i * 4), i, val); 3060 } 3061 3062 return len; 3063 } 3064 3065 /* Return register dump user app. */ 3066 static void lio_get_regs(struct net_device *dev, 3067 struct ethtool_regs *regs, void *regbuf) 3068 { 3069 struct lio *lio = GET_LIO(dev); 3070 int len = 0; 3071 struct octeon_device *oct = lio->oct_dev; 3072 3073 regs->version = OCT_ETHTOOL_REGSVER; 3074 3075 switch (oct->chip_id) { 3076 case OCTEON_CN23XX_PF_VID: 3077 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 3078 len += cn23xx_read_csr_reg(regbuf + len, oct); 3079 break; 3080 case OCTEON_CN23XX_VF_VID: 3081 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 3082 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 3083 break; 3084 case OCTEON_CN68XX: 3085 case OCTEON_CN66XX: 3086 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 3087 len += cn6xxx_read_csr_reg(regbuf + len, oct); 3088 len += cn6xxx_read_config_reg(regbuf + len, oct); 3089 break; 3090 default: 3091 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 3092 __func__, oct->chip_id); 3093 } 3094 } 3095 3096 static u32 lio_get_priv_flags(struct net_device *netdev) 3097 { 3098 struct lio *lio = GET_LIO(netdev); 3099 3100 return lio->oct_dev->priv_flags; 3101 } 3102 3103 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 3104 { 3105 struct lio *lio = GET_LIO(netdev); 3106 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 3107 3108 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 3109 intr_by_tx_bytes); 3110 return 0; 3111 } 3112 3113 static const struct ethtool_ops lio_ethtool_ops = { 3114 .get_link_ksettings = lio_get_link_ksettings, 3115 .set_link_ksettings = lio_set_link_ksettings, 3116 .get_link = ethtool_op_get_link, 3117 .get_drvinfo = lio_get_drvinfo, 3118 .get_ringparam = lio_ethtool_get_ringparam, 3119 .set_ringparam = lio_ethtool_set_ringparam, 3120 .get_channels = lio_ethtool_get_channels, 3121 .set_channels = lio_ethtool_set_channels, 3122 .set_phys_id = lio_set_phys_id, 3123 .get_eeprom_len = lio_get_eeprom_len, 3124 .get_eeprom = lio_get_eeprom, 3125 .get_strings = lio_get_strings, 3126 .get_ethtool_stats = lio_get_ethtool_stats, 3127 .get_pauseparam = lio_get_pauseparam, 3128 .set_pauseparam = lio_set_pauseparam, 3129 .get_regs_len = lio_get_regs_len, 3130 .get_regs = lio_get_regs, 3131 .get_msglevel = lio_get_msglevel, 3132 .set_msglevel = lio_set_msglevel, 3133 .get_sset_count = lio_get_sset_count, 3134 .get_coalesce = lio_get_intr_coalesce, 3135 .set_coalesce = lio_set_intr_coalesce, 3136 .get_priv_flags = lio_get_priv_flags, 3137 .set_priv_flags = lio_set_priv_flags, 3138 .get_ts_info = lio_get_ts_info, 3139 }; 3140 3141 static const struct ethtool_ops lio_vf_ethtool_ops = { 3142 .get_link_ksettings = lio_get_link_ksettings, 3143 .get_link = ethtool_op_get_link, 3144 .get_drvinfo = lio_get_vf_drvinfo, 3145 .get_ringparam = lio_ethtool_get_ringparam, 3146 .set_ringparam = lio_ethtool_set_ringparam, 3147 .get_channels = lio_ethtool_get_channels, 3148 .set_channels = lio_ethtool_set_channels, 3149 .get_strings = lio_vf_get_strings, 3150 .get_ethtool_stats = lio_vf_get_ethtool_stats, 3151 .get_regs_len = lio_get_regs_len, 3152 .get_regs = lio_get_regs, 3153 .get_msglevel = lio_get_msglevel, 3154 .set_msglevel = lio_vf_set_msglevel, 3155 .get_sset_count = lio_vf_get_sset_count, 3156 .get_coalesce = lio_get_intr_coalesce, 3157 .set_coalesce = lio_set_intr_coalesce, 3158 .get_priv_flags = lio_get_priv_flags, 3159 .set_priv_flags = lio_set_priv_flags, 3160 .get_ts_info = lio_get_ts_info, 3161 }; 3162 3163 void liquidio_set_ethtool_ops(struct net_device *netdev) 3164 { 3165 struct lio *lio = GET_LIO(netdev); 3166 struct octeon_device *oct = lio->oct_dev; 3167 3168 if (OCTEON_CN23XX_VF(oct)) 3169 netdev->ethtool_ops = &lio_vf_ethtool_ops; 3170 else 3171 netdev->ethtool_ops = &lio_ethtool_ops; 3172 } 3173