1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/netdevice.h> 19 #include <linux/net_tstamp.h> 20 #include <linux/pci.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_nic.h" 27 #include "octeon_main.h" 28 #include "octeon_network.h" 29 #include "cn66xx_regs.h" 30 #include "cn66xx_device.h" 31 #include "cn23xx_pf_device.h" 32 #include "cn23xx_vf_device.h" 33 34 static int octnet_get_link_stats(struct net_device *netdev); 35 36 struct oct_intrmod_context { 37 int octeon_id; 38 wait_queue_head_t wc; 39 int cond; 40 int status; 41 }; 42 43 struct oct_intrmod_resp { 44 u64 rh; 45 struct oct_intrmod_cfg intrmod; 46 u64 status; 47 }; 48 49 struct oct_mdio_cmd_context { 50 int octeon_id; 51 wait_queue_head_t wc; 52 int cond; 53 }; 54 55 struct oct_mdio_cmd_resp { 56 u64 rh; 57 struct oct_mdio_cmd resp; 58 u64 status; 59 }; 60 61 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 62 63 /* Octeon's interface mode of operation */ 64 enum { 65 INTERFACE_MODE_DISABLED, 66 INTERFACE_MODE_RGMII, 67 INTERFACE_MODE_GMII, 68 INTERFACE_MODE_SPI, 69 INTERFACE_MODE_PCIE, 70 INTERFACE_MODE_XAUI, 71 INTERFACE_MODE_SGMII, 72 INTERFACE_MODE_PICMG, 73 INTERFACE_MODE_NPI, 74 INTERFACE_MODE_LOOP, 75 INTERFACE_MODE_SRIO, 76 INTERFACE_MODE_ILK, 77 INTERFACE_MODE_RXAUI, 78 INTERFACE_MODE_QSGMII, 79 INTERFACE_MODE_AGL, 80 INTERFACE_MODE_XLAUI, 81 INTERFACE_MODE_XFI, 82 INTERFACE_MODE_10G_KR, 83 INTERFACE_MODE_40G_KR4, 84 INTERFACE_MODE_MIXED, 85 }; 86 87 #define OCT_ETHTOOL_REGDUMP_LEN 4096 88 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 90 #define OCT_ETHTOOL_REGSVER 1 91 92 /* statistics of PF */ 93 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 94 "rx_packets", 95 "tx_packets", 96 "rx_bytes", 97 "tx_bytes", 98 "rx_errors", /*jabber_err+l2_err+frame_err */ 99 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ 100 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + 101 *st->fromwire.dmac_drop + st->fromwire.fw_err_drop 102 */ 103 "tx_dropped", 104 105 "tx_total_sent", 106 "tx_total_fwd", 107 "tx_err_pko", 108 "tx_err_link", 109 "tx_err_drop", 110 111 "tx_tso", 112 "tx_tso_packets", 113 "tx_tso_err", 114 "tx_vxlan", 115 116 "mac_tx_total_pkts", 117 "mac_tx_total_bytes", 118 "mac_tx_mcast_pkts", 119 "mac_tx_bcast_pkts", 120 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ 121 "mac_tx_total_collisions", 122 "mac_tx_one_collision", 123 "mac_tx_multi_collison", 124 "mac_tx_max_collision_fail", 125 "mac_tx_max_deferal_fail", 126 "mac_tx_fifo_err", 127 "mac_tx_runts", 128 129 "rx_total_rcvd", 130 "rx_total_fwd", 131 "rx_jabber_err", 132 "rx_l2_err", 133 "rx_frame_err", 134 "rx_err_pko", 135 "rx_err_link", 136 "rx_err_drop", 137 138 "rx_vxlan", 139 "rx_vxlan_err", 140 141 "rx_lro_pkts", 142 "rx_lro_bytes", 143 "rx_total_lro", 144 145 "rx_lro_aborts", 146 "rx_lro_aborts_port", 147 "rx_lro_aborts_seq", 148 "rx_lro_aborts_tsval", 149 "rx_lro_aborts_timer", 150 "rx_fwd_rate", 151 152 "mac_rx_total_rcvd", 153 "mac_rx_bytes", 154 "mac_rx_total_bcst", 155 "mac_rx_total_mcst", 156 "mac_rx_runts", 157 "mac_rx_ctl_packets", 158 "mac_rx_fifo_err", 159 "mac_rx_dma_drop", 160 "mac_rx_fcs_err", 161 162 "link_state_changes", 163 }; 164 165 /* statistics of VF */ 166 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 167 "rx_packets", 168 "tx_packets", 169 "rx_bytes", 170 "tx_bytes", 171 "rx_errors", /* jabber_err + l2_err+frame_err */ 172 "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */ 173 "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */ 174 "tx_dropped", 175 "link_state_changes", 176 }; 177 178 /* statistics of host tx queue */ 179 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 180 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ 181 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ 182 "dropped", 183 "iq_busy", 184 "sgentry_sent", 185 186 "fw_instr_posted", 187 "fw_instr_processed", 188 "fw_instr_dropped", 189 "fw_bytes_sent", 190 191 "tso", 192 "vxlan", 193 "txq_restart", 194 }; 195 196 /* statistics of host rx queue */ 197 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 198 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ 199 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ 200 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ 201 *oct->droq[oq_no]->stats.dropped_nodispatch+ 202 *oct->droq[oq_no]->stats.dropped_toomany+ 203 *oct->droq[oq_no]->stats.dropped_nomem 204 */ 205 "dropped_nomem", 206 "dropped_toomany", 207 "fw_dropped", 208 "fw_pkts_received", 209 "fw_bytes_received", 210 "fw_dropped_nodispatch", 211 212 "vxlan", 213 "buffer_alloc_failure", 214 }; 215 216 /* LiquidIO driver private flags */ 217 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 218 }; 219 220 #define OCTNIC_NCMD_AUTONEG_ON 0x1 221 #define OCTNIC_NCMD_PHY_ON 0x2 222 223 static int lio_get_link_ksettings(struct net_device *netdev, 224 struct ethtool_link_ksettings *ecmd) 225 { 226 struct lio *lio = GET_LIO(netdev); 227 struct octeon_device *oct = lio->oct_dev; 228 struct oct_link_info *linfo; 229 u32 supported = 0, advertising = 0; 230 231 linfo = &lio->linfo; 232 233 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 234 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 235 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || 236 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 237 ecmd->base.port = PORT_FIBRE; 238 239 if (linfo->link.s.speed == SPEED_10000) { 240 supported = SUPPORTED_10000baseT_Full; 241 advertising = ADVERTISED_10000baseT_Full; 242 } 243 244 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; 245 advertising |= ADVERTISED_Pause; 246 ethtool_convert_legacy_u32_to_link_mode( 247 ecmd->link_modes.supported, supported); 248 ethtool_convert_legacy_u32_to_link_mode( 249 ecmd->link_modes.advertising, advertising); 250 ecmd->base.autoneg = AUTONEG_DISABLE; 251 252 } else { 253 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", 254 linfo->link.s.if_mode); 255 } 256 257 if (linfo->link.s.link_up) { 258 ecmd->base.speed = linfo->link.s.speed; 259 ecmd->base.duplex = linfo->link.s.duplex; 260 } else { 261 ecmd->base.speed = SPEED_UNKNOWN; 262 ecmd->base.duplex = DUPLEX_UNKNOWN; 263 } 264 265 return 0; 266 } 267 268 static void 269 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 270 { 271 struct lio *lio; 272 struct octeon_device *oct; 273 274 lio = GET_LIO(netdev); 275 oct = lio->oct_dev; 276 277 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 278 strcpy(drvinfo->driver, "liquidio"); 279 strcpy(drvinfo->version, LIQUIDIO_VERSION); 280 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 281 ETHTOOL_FWVERS_LEN); 282 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 283 } 284 285 static void 286 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 287 { 288 struct octeon_device *oct; 289 struct lio *lio; 290 291 lio = GET_LIO(netdev); 292 oct = lio->oct_dev; 293 294 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 295 strcpy(drvinfo->driver, "liquidio_vf"); 296 strcpy(drvinfo->version, LIQUIDIO_VERSION); 297 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 298 ETHTOOL_FWVERS_LEN); 299 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 300 } 301 302 static void 303 lio_ethtool_get_channels(struct net_device *dev, 304 struct ethtool_channels *channel) 305 { 306 struct lio *lio = GET_LIO(dev); 307 struct octeon_device *oct = lio->oct_dev; 308 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 309 310 if (OCTEON_CN6XXX(oct)) { 311 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 312 313 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 314 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 315 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 316 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 317 } else if (OCTEON_CN23XX_PF(oct)) { 318 319 max_rx = oct->sriov_info.num_pf_rings; 320 max_tx = oct->sriov_info.num_pf_rings; 321 rx_count = lio->linfo.num_rxpciq; 322 tx_count = lio->linfo.num_txpciq; 323 } else if (OCTEON_CN23XX_VF(oct)) { 324 max_tx = oct->sriov_info.rings_per_vf; 325 max_rx = oct->sriov_info.rings_per_vf; 326 rx_count = lio->linfo.num_rxpciq; 327 tx_count = lio->linfo.num_txpciq; 328 } 329 330 channel->max_rx = max_rx; 331 channel->max_tx = max_tx; 332 channel->rx_count = rx_count; 333 channel->tx_count = tx_count; 334 } 335 336 static int lio_get_eeprom_len(struct net_device *netdev) 337 { 338 u8 buf[128]; 339 struct lio *lio = GET_LIO(netdev); 340 struct octeon_device *oct_dev = lio->oct_dev; 341 struct octeon_board_info *board_info; 342 int len; 343 344 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 345 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 346 board_info->name, board_info->serial_number, 347 board_info->major, board_info->minor); 348 349 return len; 350 } 351 352 static int 353 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 354 u8 *bytes) 355 { 356 struct lio *lio = GET_LIO(netdev); 357 struct octeon_device *oct_dev = lio->oct_dev; 358 struct octeon_board_info *board_info; 359 360 if (eeprom->offset) 361 return -EINVAL; 362 363 eeprom->magic = oct_dev->pci_dev->vendor; 364 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 365 sprintf((char *)bytes, 366 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 367 board_info->name, board_info->serial_number, 368 board_info->major, board_info->minor); 369 370 return 0; 371 } 372 373 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 374 { 375 struct lio *lio = GET_LIO(netdev); 376 struct octeon_device *oct = lio->oct_dev; 377 struct octnic_ctrl_pkt nctrl; 378 int ret = 0; 379 380 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 381 382 nctrl.ncmd.u64 = 0; 383 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 384 nctrl.ncmd.s.param1 = addr; 385 nctrl.ncmd.s.param2 = val; 386 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 387 nctrl.wait_time = 100; 388 nctrl.netpndev = (u64)netdev; 389 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 390 391 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 392 if (ret < 0) { 393 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 394 return -EINVAL; 395 } 396 397 return 0; 398 } 399 400 static int octnet_id_active(struct net_device *netdev, int val) 401 { 402 struct lio *lio = GET_LIO(netdev); 403 struct octeon_device *oct = lio->oct_dev; 404 struct octnic_ctrl_pkt nctrl; 405 int ret = 0; 406 407 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 408 409 nctrl.ncmd.u64 = 0; 410 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 411 nctrl.ncmd.s.param1 = val; 412 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 413 nctrl.wait_time = 100; 414 nctrl.netpndev = (u64)netdev; 415 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 416 417 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 418 if (ret < 0) { 419 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 420 return -EINVAL; 421 } 422 423 return 0; 424 } 425 426 /* Callback for when mdio command response arrives 427 */ 428 static void octnet_mdio_resp_callback(struct octeon_device *oct, 429 u32 status, 430 void *buf) 431 { 432 struct oct_mdio_cmd_context *mdio_cmd_ctx; 433 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 434 435 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 436 437 oct = lio_get_device(mdio_cmd_ctx->octeon_id); 438 if (status) { 439 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", 440 CVM_CAST64(status)); 441 WRITE_ONCE(mdio_cmd_ctx->cond, -1); 442 } else { 443 WRITE_ONCE(mdio_cmd_ctx->cond, 1); 444 } 445 wake_up_interruptible(&mdio_cmd_ctx->wc); 446 } 447 448 /* This routine provides PHY access routines for 449 * mdio clause45 . 450 */ 451 static int 452 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 453 { 454 struct octeon_device *oct_dev = lio->oct_dev; 455 struct octeon_soft_command *sc; 456 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 457 struct oct_mdio_cmd_context *mdio_cmd_ctx; 458 struct oct_mdio_cmd *mdio_cmd; 459 int retval = 0; 460 461 sc = (struct octeon_soft_command *) 462 octeon_alloc_soft_command(oct_dev, 463 sizeof(struct oct_mdio_cmd), 464 sizeof(struct oct_mdio_cmd_resp), 465 sizeof(struct oct_mdio_cmd_context)); 466 467 if (!sc) 468 return -ENOMEM; 469 470 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 471 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 472 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 473 474 WRITE_ONCE(mdio_cmd_ctx->cond, 0); 475 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); 476 mdio_cmd->op = op; 477 mdio_cmd->mdio_addr = loc; 478 if (op) 479 mdio_cmd->value1 = *value; 480 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 481 482 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 483 484 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 485 0, 0, 0); 486 487 sc->wait_time = 1000; 488 sc->callback = octnet_mdio_resp_callback; 489 sc->callback_arg = sc; 490 491 init_waitqueue_head(&mdio_cmd_ctx->wc); 492 493 retval = octeon_send_soft_command(oct_dev, sc); 494 495 if (retval == IQ_SEND_FAILED) { 496 dev_err(&oct_dev->pci_dev->dev, 497 "octnet_mdio45_access instruction failed status: %x\n", 498 retval); 499 retval = -EBUSY; 500 } else { 501 /* Sleep on a wait queue till the cond flag indicates that the 502 * response arrived 503 */ 504 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); 505 retval = mdio_cmd_rsp->status; 506 if (retval) { 507 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); 508 retval = -EBUSY; 509 } else { 510 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 511 sizeof(struct oct_mdio_cmd) / 8); 512 513 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { 514 if (!op) 515 *value = mdio_cmd_rsp->resp.value1; 516 } else { 517 retval = -EINVAL; 518 } 519 } 520 } 521 522 octeon_free_soft_command(oct_dev, sc); 523 524 return retval; 525 } 526 527 static int lio_set_phys_id(struct net_device *netdev, 528 enum ethtool_phys_id_state state) 529 { 530 struct lio *lio = GET_LIO(netdev); 531 struct octeon_device *oct = lio->oct_dev; 532 int value, ret; 533 534 switch (state) { 535 case ETHTOOL_ID_ACTIVE: 536 if (oct->chip_id == OCTEON_CN66XX) { 537 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 538 VITESSE_PHY_GPIO_DRIVEON); 539 return 2; 540 541 } else if (oct->chip_id == OCTEON_CN68XX) { 542 /* Save the current LED settings */ 543 ret = octnet_mdio45_access(lio, 0, 544 LIO68XX_LED_BEACON_ADDR, 545 &lio->phy_beacon_val); 546 if (ret) 547 return ret; 548 549 ret = octnet_mdio45_access(lio, 0, 550 LIO68XX_LED_CTRL_ADDR, 551 &lio->led_ctrl_val); 552 if (ret) 553 return ret; 554 555 /* Configure Beacon values */ 556 value = LIO68XX_LED_BEACON_CFGON; 557 ret = octnet_mdio45_access(lio, 1, 558 LIO68XX_LED_BEACON_ADDR, 559 &value); 560 if (ret) 561 return ret; 562 563 value = LIO68XX_LED_CTRL_CFGON; 564 ret = octnet_mdio45_access(lio, 1, 565 LIO68XX_LED_CTRL_ADDR, 566 &value); 567 if (ret) 568 return ret; 569 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 570 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 571 572 /* returns 0 since updates are asynchronous */ 573 return 0; 574 } else { 575 return -EINVAL; 576 } 577 break; 578 579 case ETHTOOL_ID_ON: 580 if (oct->chip_id == OCTEON_CN66XX) { 581 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 582 VITESSE_PHY_GPIO_HIGH); 583 584 } else if (oct->chip_id == OCTEON_CN68XX) { 585 return -EINVAL; 586 } else { 587 return -EINVAL; 588 } 589 break; 590 591 case ETHTOOL_ID_OFF: 592 if (oct->chip_id == OCTEON_CN66XX) 593 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 594 VITESSE_PHY_GPIO_LOW); 595 else if (oct->chip_id == OCTEON_CN68XX) 596 return -EINVAL; 597 else 598 return -EINVAL; 599 600 break; 601 602 case ETHTOOL_ID_INACTIVE: 603 if (oct->chip_id == OCTEON_CN66XX) { 604 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 605 VITESSE_PHY_GPIO_DRIVEOFF); 606 } else if (oct->chip_id == OCTEON_CN68XX) { 607 /* Restore LED settings */ 608 ret = octnet_mdio45_access(lio, 1, 609 LIO68XX_LED_CTRL_ADDR, 610 &lio->led_ctrl_val); 611 if (ret) 612 return ret; 613 614 ret = octnet_mdio45_access(lio, 1, 615 LIO68XX_LED_BEACON_ADDR, 616 &lio->phy_beacon_val); 617 if (ret) 618 return ret; 619 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 620 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 621 622 return 0; 623 } else { 624 return -EINVAL; 625 } 626 break; 627 628 default: 629 return -EINVAL; 630 } 631 632 return 0; 633 } 634 635 static void 636 lio_ethtool_get_ringparam(struct net_device *netdev, 637 struct ethtool_ringparam *ering) 638 { 639 struct lio *lio = GET_LIO(netdev); 640 struct octeon_device *oct = lio->oct_dev; 641 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 642 rx_pending = 0; 643 644 if (OCTEON_CN6XXX(oct)) { 645 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 646 647 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 648 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 649 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 650 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 651 } else if (OCTEON_CN23XX_PF(oct)) { 652 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); 653 654 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 655 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 656 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx); 657 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx); 658 } 659 660 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { 661 ering->rx_pending = 0; 662 ering->rx_max_pending = 0; 663 ering->rx_mini_pending = 0; 664 ering->rx_jumbo_pending = rx_pending; 665 ering->rx_mini_max_pending = 0; 666 ering->rx_jumbo_max_pending = rx_max_pending; 667 } else { 668 ering->rx_pending = rx_pending; 669 ering->rx_max_pending = rx_max_pending; 670 ering->rx_mini_pending = 0; 671 ering->rx_jumbo_pending = 0; 672 ering->rx_mini_max_pending = 0; 673 ering->rx_jumbo_max_pending = 0; 674 } 675 676 ering->tx_pending = tx_pending; 677 ering->tx_max_pending = tx_max_pending; 678 } 679 680 static u32 lio_get_msglevel(struct net_device *netdev) 681 { 682 struct lio *lio = GET_LIO(netdev); 683 684 return lio->msg_enable; 685 } 686 687 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 688 { 689 struct lio *lio = GET_LIO(netdev); 690 691 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 692 if (msglvl & NETIF_MSG_HW) 693 liquidio_set_feature(netdev, 694 OCTNET_CMD_VERBOSE_ENABLE, 0); 695 else 696 liquidio_set_feature(netdev, 697 OCTNET_CMD_VERBOSE_DISABLE, 0); 698 } 699 700 lio->msg_enable = msglvl; 701 } 702 703 static void 704 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 705 { 706 /* Notes: Not supporting any auto negotiation in these 707 * drivers. Just report pause frame support. 708 */ 709 struct lio *lio = GET_LIO(netdev); 710 struct octeon_device *oct = lio->oct_dev; 711 712 pause->autoneg = 0; 713 714 pause->tx_pause = oct->tx_pause; 715 pause->rx_pause = oct->rx_pause; 716 } 717 718 static int 719 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 720 { 721 /* Notes: Not supporting any auto negotiation in these 722 * drivers. 723 */ 724 struct lio *lio = GET_LIO(netdev); 725 struct octeon_device *oct = lio->oct_dev; 726 struct octnic_ctrl_pkt nctrl; 727 struct oct_link_info *linfo = &lio->linfo; 728 729 int ret = 0; 730 731 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 732 return -EINVAL; 733 734 if (linfo->link.s.duplex == 0) { 735 /*no flow control for half duplex*/ 736 if (pause->rx_pause || pause->tx_pause) 737 return -EINVAL; 738 } 739 740 /*do not support autoneg of link flow control*/ 741 if (pause->autoneg == AUTONEG_ENABLE) 742 return -EINVAL; 743 744 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 745 746 nctrl.ncmd.u64 = 0; 747 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 748 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 749 nctrl.wait_time = 100; 750 nctrl.netpndev = (u64)netdev; 751 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 752 753 if (pause->rx_pause) { 754 /*enable rx pause*/ 755 nctrl.ncmd.s.param1 = 1; 756 } else { 757 /*disable rx pause*/ 758 nctrl.ncmd.s.param1 = 0; 759 } 760 761 if (pause->tx_pause) { 762 /*enable tx pause*/ 763 nctrl.ncmd.s.param2 = 1; 764 } else { 765 /*disable tx pause*/ 766 nctrl.ncmd.s.param2 = 0; 767 } 768 769 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 770 if (ret < 0) { 771 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); 772 return -EINVAL; 773 } 774 775 oct->rx_pause = pause->rx_pause; 776 oct->tx_pause = pause->tx_pause; 777 778 return 0; 779 } 780 781 static void 782 lio_get_ethtool_stats(struct net_device *netdev, 783 struct ethtool_stats *stats __attribute__((unused)), 784 u64 *data) 785 { 786 struct lio *lio = GET_LIO(netdev); 787 struct octeon_device *oct_dev = lio->oct_dev; 788 struct net_device_stats *netstats = &netdev->stats; 789 int i = 0, j; 790 791 netdev->netdev_ops->ndo_get_stats(netdev); 792 octnet_get_link_stats(netdev); 793 794 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 795 data[i++] = CVM_CAST64(netstats->rx_packets); 796 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 797 data[i++] = CVM_CAST64(netstats->tx_packets); 798 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 799 data[i++] = CVM_CAST64(netstats->rx_bytes); 800 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 801 data[i++] = CVM_CAST64(netstats->tx_bytes); 802 data[i++] = CVM_CAST64(netstats->rx_errors); 803 data[i++] = CVM_CAST64(netstats->tx_errors); 804 /*sum of oct->droq[oq_no]->stats->rx_dropped + 805 *oct->droq[oq_no]->stats->dropped_nodispatch + 806 *oct->droq[oq_no]->stats->dropped_toomany + 807 *oct->droq[oq_no]->stats->dropped_nomem 808 */ 809 data[i++] = CVM_CAST64(netstats->rx_dropped); 810 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 811 data[i++] = CVM_CAST64(netstats->tx_dropped); 812 813 /* firmware tx stats */ 814 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 815 *fromhost.fw_total_sent 816 */ 817 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 818 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 819 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 820 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 821 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 822 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 823 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 824 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 825 *fw_err_drop 826 */ 827 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 828 829 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 830 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 831 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 832 *fw_tso_fwd 833 */ 834 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 835 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 836 *fw_err_tso 837 */ 838 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 839 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 840 *fw_tx_vxlan 841 */ 842 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 843 844 /* mac tx statistics */ 845 /*CVMX_BGXX_CMRX_TX_STAT5 */ 846 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 847 /*CVMX_BGXX_CMRX_TX_STAT4 */ 848 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 849 /*CVMX_BGXX_CMRX_TX_STAT15 */ 850 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 851 /*CVMX_BGXX_CMRX_TX_STAT14 */ 852 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 853 /*CVMX_BGXX_CMRX_TX_STAT17 */ 854 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 855 /*CVMX_BGXX_CMRX_TX_STAT0 */ 856 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 857 /*CVMX_BGXX_CMRX_TX_STAT3 */ 858 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 859 /*CVMX_BGXX_CMRX_TX_STAT2 */ 860 data[i++] = 861 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 862 /*CVMX_BGXX_CMRX_TX_STAT0 */ 863 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 864 /*CVMX_BGXX_CMRX_TX_STAT1 */ 865 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 866 /*CVMX_BGXX_CMRX_TX_STAT16 */ 867 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 868 /*CVMX_BGXX_CMRX_TX_STAT6 */ 869 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 870 871 /* RX firmware stats */ 872 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 873 *fw_total_rcvd 874 */ 875 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 876 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 877 *fw_total_fwd 878 */ 879 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 880 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 881 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 882 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 883 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 884 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 885 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 886 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 887 *fw_err_pko 888 */ 889 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 890 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 891 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 892 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 893 *fromwire.fw_err_drop 894 */ 895 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 896 897 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 898 *fromwire.fw_rx_vxlan 899 */ 900 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 901 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 902 *fromwire.fw_rx_vxlan_err 903 */ 904 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 905 906 /* LRO */ 907 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 908 *fw_lro_pkts 909 */ 910 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 911 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 912 *fw_lro_octs 913 */ 914 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 915 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 916 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 917 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 918 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 919 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 920 *fw_lro_aborts_port 921 */ 922 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 923 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 924 *fw_lro_aborts_seq 925 */ 926 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 927 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 928 *fw_lro_aborts_tsval 929 */ 930 data[i++] = 931 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 932 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 933 *fw_lro_aborts_timer 934 */ 935 /* intrmod: packet forward rate */ 936 data[i++] = 937 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 938 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 939 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 940 941 /* mac: link-level stats */ 942 /*CVMX_BGXX_CMRX_RX_STAT0 */ 943 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 944 /*CVMX_BGXX_CMRX_RX_STAT1 */ 945 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 946 /*CVMX_PKI_STATX_STAT5 */ 947 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 948 /*CVMX_PKI_STATX_STAT5 */ 949 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 950 /*wqe->word2.err_code or wqe->word2.err_level */ 951 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 952 /*CVMX_BGXX_CMRX_RX_STAT2 */ 953 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 954 /*CVMX_BGXX_CMRX_RX_STAT6 */ 955 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 956 /*CVMX_BGXX_CMRX_RX_STAT4 */ 957 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 958 /*wqe->word2.err_code or wqe->word2.err_level */ 959 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 960 /*lio->link_changes*/ 961 data[i++] = CVM_CAST64(lio->link_changes); 962 963 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 964 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 965 continue; 966 /*packets to network port*/ 967 /*# of packets tx to network */ 968 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 969 /*# of bytes tx to network */ 970 data[i++] = 971 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 972 /*# of packets dropped */ 973 data[i++] = 974 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 975 /*# of tx fails due to queue full */ 976 data[i++] = 977 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 978 /*XXX gather entries sent */ 979 data[i++] = 980 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 981 982 /*instruction to firmware: data and control */ 983 /*# of instructions to the queue */ 984 data[i++] = 985 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 986 /*# of instructions processed */ 987 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> 988 stats.instr_processed); 989 /*# of instructions could not be processed */ 990 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> 991 stats.instr_dropped); 992 /*bytes sent through the queue */ 993 data[i++] = 994 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 995 996 /*tso request*/ 997 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 998 /*vxlan request*/ 999 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1000 /*txq restart*/ 1001 data[i++] = 1002 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 1003 } 1004 1005 /* RX */ 1006 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 1007 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 1008 continue; 1009 1010 /*packets send to TCP/IP network stack */ 1011 /*# of packets to network stack */ 1012 data[i++] = 1013 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 1014 /*# of bytes to network stack */ 1015 data[i++] = 1016 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 1017 /*# of packets dropped */ 1018 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1019 oct_dev->droq[j]->stats.dropped_toomany + 1020 oct_dev->droq[j]->stats.rx_dropped); 1021 data[i++] = 1022 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1023 data[i++] = 1024 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1025 data[i++] = 1026 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1027 1028 /*control and data path*/ 1029 data[i++] = 1030 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1031 data[i++] = 1032 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1033 data[i++] = 1034 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1035 1036 data[i++] = 1037 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1038 data[i++] = 1039 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1040 } 1041 } 1042 1043 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1044 struct ethtool_stats *stats 1045 __attribute__((unused)), 1046 u64 *data) 1047 { 1048 struct net_device_stats *netstats = &netdev->stats; 1049 struct lio *lio = GET_LIO(netdev); 1050 struct octeon_device *oct_dev = lio->oct_dev; 1051 int i = 0, j, vj; 1052 1053 netdev->netdev_ops->ndo_get_stats(netdev); 1054 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1055 data[i++] = CVM_CAST64(netstats->rx_packets); 1056 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1057 data[i++] = CVM_CAST64(netstats->tx_packets); 1058 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1059 data[i++] = CVM_CAST64(netstats->rx_bytes); 1060 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1061 data[i++] = CVM_CAST64(netstats->tx_bytes); 1062 data[i++] = CVM_CAST64(netstats->rx_errors); 1063 data[i++] = CVM_CAST64(netstats->tx_errors); 1064 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1065 * oct->droq[oq_no]->stats->dropped_nodispatch + 1066 * oct->droq[oq_no]->stats->dropped_toomany + 1067 * oct->droq[oq_no]->stats->dropped_nomem 1068 */ 1069 data[i++] = CVM_CAST64(netstats->rx_dropped); 1070 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1071 data[i++] = CVM_CAST64(netstats->tx_dropped); 1072 /* lio->link_changes */ 1073 data[i++] = CVM_CAST64(lio->link_changes); 1074 1075 for (vj = 0; vj < lio->linfo.num_txpciq; vj++) { 1076 j = lio->linfo.txpciq[vj].s.q_no; 1077 1078 /* packets to network port */ 1079 /* # of packets tx to network */ 1080 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1081 /* # of bytes tx to network */ 1082 data[i++] = CVM_CAST64( 1083 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1084 /* # of packets dropped */ 1085 data[i++] = CVM_CAST64( 1086 oct_dev->instr_queue[j]->stats.tx_dropped); 1087 /* # of tx fails due to queue full */ 1088 data[i++] = CVM_CAST64( 1089 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1090 /* XXX gather entries sent */ 1091 data[i++] = CVM_CAST64( 1092 oct_dev->instr_queue[j]->stats.sgentry_sent); 1093 1094 /* instruction to firmware: data and control */ 1095 /* # of instructions to the queue */ 1096 data[i++] = CVM_CAST64( 1097 oct_dev->instr_queue[j]->stats.instr_posted); 1098 /* # of instructions processed */ 1099 data[i++] = 1100 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1101 /* # of instructions could not be processed */ 1102 data[i++] = 1103 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1104 /* bytes sent through the queue */ 1105 data[i++] = CVM_CAST64( 1106 oct_dev->instr_queue[j]->stats.bytes_sent); 1107 /* tso request */ 1108 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1109 /* vxlan request */ 1110 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1111 /* txq restart */ 1112 data[i++] = CVM_CAST64( 1113 oct_dev->instr_queue[j]->stats.tx_restart); 1114 } 1115 1116 /* RX */ 1117 for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) { 1118 j = lio->linfo.rxpciq[vj].s.q_no; 1119 1120 /* packets send to TCP/IP network stack */ 1121 /* # of packets to network stack */ 1122 data[i++] = CVM_CAST64( 1123 oct_dev->droq[j]->stats.rx_pkts_received); 1124 /* # of bytes to network stack */ 1125 data[i++] = CVM_CAST64( 1126 oct_dev->droq[j]->stats.rx_bytes_received); 1127 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1128 oct_dev->droq[j]->stats.dropped_toomany + 1129 oct_dev->droq[j]->stats.rx_dropped); 1130 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1131 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1132 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1133 1134 /* control and data path */ 1135 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1136 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1137 data[i++] = 1138 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1139 1140 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1141 data[i++] = 1142 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1143 } 1144 } 1145 1146 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1147 { 1148 struct octeon_device *oct_dev = lio->oct_dev; 1149 int i; 1150 1151 switch (oct_dev->chip_id) { 1152 case OCTEON_CN23XX_PF_VID: 1153 case OCTEON_CN23XX_VF_VID: 1154 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1155 sprintf(data, "%s", oct_priv_flags_strings[i]); 1156 data += ETH_GSTRING_LEN; 1157 } 1158 break; 1159 case OCTEON_CN68XX: 1160 case OCTEON_CN66XX: 1161 break; 1162 default: 1163 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1164 break; 1165 } 1166 } 1167 1168 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1169 { 1170 struct lio *lio = GET_LIO(netdev); 1171 struct octeon_device *oct_dev = lio->oct_dev; 1172 int num_iq_stats, num_oq_stats, i, j; 1173 int num_stats; 1174 1175 switch (stringset) { 1176 case ETH_SS_STATS: 1177 num_stats = ARRAY_SIZE(oct_stats_strings); 1178 for (j = 0; j < num_stats; j++) { 1179 sprintf(data, "%s", oct_stats_strings[j]); 1180 data += ETH_GSTRING_LEN; 1181 } 1182 1183 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1184 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1185 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1186 continue; 1187 for (j = 0; j < num_iq_stats; j++) { 1188 sprintf(data, "tx-%d-%s", i, 1189 oct_iq_stats_strings[j]); 1190 data += ETH_GSTRING_LEN; 1191 } 1192 } 1193 1194 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1195 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1196 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1197 continue; 1198 for (j = 0; j < num_oq_stats; j++) { 1199 sprintf(data, "rx-%d-%s", i, 1200 oct_droq_stats_strings[j]); 1201 data += ETH_GSTRING_LEN; 1202 } 1203 } 1204 break; 1205 1206 case ETH_SS_PRIV_FLAGS: 1207 lio_get_priv_flags_strings(lio, data); 1208 break; 1209 default: 1210 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1211 break; 1212 } 1213 } 1214 1215 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1216 u8 *data) 1217 { 1218 int num_iq_stats, num_oq_stats, i, j; 1219 struct lio *lio = GET_LIO(netdev); 1220 struct octeon_device *oct_dev = lio->oct_dev; 1221 int num_stats; 1222 1223 switch (stringset) { 1224 case ETH_SS_STATS: 1225 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1226 for (j = 0; j < num_stats; j++) { 1227 sprintf(data, "%s", oct_vf_stats_strings[j]); 1228 data += ETH_GSTRING_LEN; 1229 } 1230 1231 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1232 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1233 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1234 continue; 1235 for (j = 0; j < num_iq_stats; j++) { 1236 sprintf(data, "tx-%d-%s", i, 1237 oct_iq_stats_strings[j]); 1238 data += ETH_GSTRING_LEN; 1239 } 1240 } 1241 1242 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1243 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1244 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1245 continue; 1246 for (j = 0; j < num_oq_stats; j++) { 1247 sprintf(data, "rx-%d-%s", i, 1248 oct_droq_stats_strings[j]); 1249 data += ETH_GSTRING_LEN; 1250 } 1251 } 1252 break; 1253 1254 case ETH_SS_PRIV_FLAGS: 1255 lio_get_priv_flags_strings(lio, data); 1256 break; 1257 default: 1258 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1259 break; 1260 } 1261 } 1262 1263 static int lio_get_priv_flags_ss_count(struct lio *lio) 1264 { 1265 struct octeon_device *oct_dev = lio->oct_dev; 1266 1267 switch (oct_dev->chip_id) { 1268 case OCTEON_CN23XX_PF_VID: 1269 case OCTEON_CN23XX_VF_VID: 1270 return ARRAY_SIZE(oct_priv_flags_strings); 1271 case OCTEON_CN68XX: 1272 case OCTEON_CN66XX: 1273 return -EOPNOTSUPP; 1274 default: 1275 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1276 return -EOPNOTSUPP; 1277 } 1278 } 1279 1280 static int lio_get_sset_count(struct net_device *netdev, int sset) 1281 { 1282 struct lio *lio = GET_LIO(netdev); 1283 struct octeon_device *oct_dev = lio->oct_dev; 1284 1285 switch (sset) { 1286 case ETH_SS_STATS: 1287 return (ARRAY_SIZE(oct_stats_strings) + 1288 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1289 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1290 case ETH_SS_PRIV_FLAGS: 1291 return lio_get_priv_flags_ss_count(lio); 1292 default: 1293 return -EOPNOTSUPP; 1294 } 1295 } 1296 1297 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 1298 { 1299 struct lio *lio = GET_LIO(netdev); 1300 struct octeon_device *oct_dev = lio->oct_dev; 1301 1302 switch (sset) { 1303 case ETH_SS_STATS: 1304 return (ARRAY_SIZE(oct_vf_stats_strings) + 1305 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1306 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1307 case ETH_SS_PRIV_FLAGS: 1308 return lio_get_priv_flags_ss_count(lio); 1309 default: 1310 return -EOPNOTSUPP; 1311 } 1312 } 1313 1314 /* Callback function for intrmod */ 1315 static void octnet_intrmod_callback(struct octeon_device *oct_dev, 1316 u32 status, 1317 void *ptr) 1318 { 1319 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 1320 struct oct_intrmod_context *ctx; 1321 1322 ctx = (struct oct_intrmod_context *)sc->ctxptr; 1323 1324 ctx->status = status; 1325 1326 oct_dev = lio_get_device(ctx->octeon_id); 1327 1328 WRITE_ONCE(ctx->cond, 1); 1329 1330 /* This barrier is required to be sure that the response has been 1331 * written fully before waking up the handler 1332 */ 1333 wmb(); 1334 1335 wake_up_interruptible(&ctx->wc); 1336 } 1337 1338 /* get interrupt moderation parameters */ 1339 static int octnet_get_intrmod_cfg(struct lio *lio, 1340 struct oct_intrmod_cfg *intr_cfg) 1341 { 1342 struct octeon_soft_command *sc; 1343 struct oct_intrmod_context *ctx; 1344 struct oct_intrmod_resp *resp; 1345 int retval; 1346 struct octeon_device *oct_dev = lio->oct_dev; 1347 1348 /* Alloc soft command */ 1349 sc = (struct octeon_soft_command *) 1350 octeon_alloc_soft_command(oct_dev, 1351 0, 1352 sizeof(struct oct_intrmod_resp), 1353 sizeof(struct oct_intrmod_context)); 1354 1355 if (!sc) 1356 return -ENOMEM; 1357 1358 resp = (struct oct_intrmod_resp *)sc->virtrptr; 1359 memset(resp, 0, sizeof(struct oct_intrmod_resp)); 1360 1361 ctx = (struct oct_intrmod_context *)sc->ctxptr; 1362 memset(ctx, 0, sizeof(struct oct_intrmod_context)); 1363 WRITE_ONCE(ctx->cond, 0); 1364 ctx->octeon_id = lio_get_device_id(oct_dev); 1365 init_waitqueue_head(&ctx->wc); 1366 1367 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1368 1369 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1370 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); 1371 1372 sc->callback = octnet_intrmod_callback; 1373 sc->callback_arg = sc; 1374 sc->wait_time = 1000; 1375 1376 retval = octeon_send_soft_command(oct_dev, sc); 1377 if (retval == IQ_SEND_FAILED) { 1378 octeon_free_soft_command(oct_dev, sc); 1379 return -EINVAL; 1380 } 1381 1382 /* Sleep on a wait queue till the cond flag indicates that the 1383 * response arrived or timed-out. 1384 */ 1385 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 1386 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); 1387 goto intrmod_info_wait_intr; 1388 } 1389 1390 retval = ctx->status || resp->status; 1391 if (retval) { 1392 dev_err(&oct_dev->pci_dev->dev, 1393 "Get interrupt moderation parameters failed\n"); 1394 goto intrmod_info_wait_fail; 1395 } 1396 1397 octeon_swap_8B_data((u64 *)&resp->intrmod, 1398 (sizeof(struct oct_intrmod_cfg)) / 8); 1399 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); 1400 octeon_free_soft_command(oct_dev, sc); 1401 1402 return 0; 1403 1404 intrmod_info_wait_fail: 1405 1406 octeon_free_soft_command(oct_dev, sc); 1407 1408 intrmod_info_wait_intr: 1409 1410 return -ENODEV; 1411 } 1412 1413 /* Configure interrupt moderation parameters */ 1414 static int octnet_set_intrmod_cfg(struct lio *lio, 1415 struct oct_intrmod_cfg *intr_cfg) 1416 { 1417 struct octeon_soft_command *sc; 1418 struct oct_intrmod_context *ctx; 1419 struct oct_intrmod_cfg *cfg; 1420 int retval; 1421 struct octeon_device *oct_dev = lio->oct_dev; 1422 1423 /* Alloc soft command */ 1424 sc = (struct octeon_soft_command *) 1425 octeon_alloc_soft_command(oct_dev, 1426 sizeof(struct oct_intrmod_cfg), 1427 0, 1428 sizeof(struct oct_intrmod_context)); 1429 1430 if (!sc) 1431 return -ENOMEM; 1432 1433 ctx = (struct oct_intrmod_context *)sc->ctxptr; 1434 1435 WRITE_ONCE(ctx->cond, 0); 1436 ctx->octeon_id = lio_get_device_id(oct_dev); 1437 init_waitqueue_head(&ctx->wc); 1438 1439 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 1440 1441 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 1442 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 1443 1444 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1445 1446 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1447 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 1448 1449 sc->callback = octnet_intrmod_callback; 1450 sc->callback_arg = sc; 1451 sc->wait_time = 1000; 1452 1453 retval = octeon_send_soft_command(oct_dev, sc); 1454 if (retval == IQ_SEND_FAILED) { 1455 octeon_free_soft_command(oct_dev, sc); 1456 return -EINVAL; 1457 } 1458 1459 /* Sleep on a wait queue till the cond flag indicates that the 1460 * response arrived or timed-out. 1461 */ 1462 if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { 1463 retval = ctx->status; 1464 if (retval) 1465 dev_err(&oct_dev->pci_dev->dev, 1466 "intrmod config failed. Status: %llx\n", 1467 CVM_CAST64(retval)); 1468 else 1469 dev_info(&oct_dev->pci_dev->dev, 1470 "Rx-Adaptive Interrupt moderation %s\n", 1471 (intr_cfg->rx_enable) ? 1472 "enabled" : "disabled"); 1473 1474 octeon_free_soft_command(oct_dev, sc); 1475 1476 return ((retval) ? -ENODEV : 0); 1477 } 1478 1479 dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); 1480 1481 return -EINTR; 1482 } 1483 1484 static void 1485 octnet_nic_stats_callback(struct octeon_device *oct_dev, 1486 u32 status, void *ptr) 1487 { 1488 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 1489 struct oct_nic_stats_resp *resp = 1490 (struct oct_nic_stats_resp *)sc->virtrptr; 1491 struct oct_nic_stats_ctrl *ctrl = 1492 (struct oct_nic_stats_ctrl *)sc->ctxptr; 1493 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; 1494 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; 1495 1496 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; 1497 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; 1498 1499 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { 1500 octeon_swap_8B_data((u64 *)&resp->stats, 1501 (sizeof(struct oct_link_stats)) >> 3); 1502 1503 /* RX link-level stats */ 1504 rstats->total_rcvd = rsp_rstats->total_rcvd; 1505 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; 1506 rstats->total_bcst = rsp_rstats->total_bcst; 1507 rstats->total_mcst = rsp_rstats->total_mcst; 1508 rstats->runts = rsp_rstats->runts; 1509 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; 1510 /* Accounts for over/under-run of buffers */ 1511 rstats->fifo_err = rsp_rstats->fifo_err; 1512 rstats->dmac_drop = rsp_rstats->dmac_drop; 1513 rstats->fcs_err = rsp_rstats->fcs_err; 1514 rstats->jabber_err = rsp_rstats->jabber_err; 1515 rstats->l2_err = rsp_rstats->l2_err; 1516 rstats->frame_err = rsp_rstats->frame_err; 1517 1518 /* RX firmware stats */ 1519 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; 1520 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; 1521 rstats->fw_err_pko = rsp_rstats->fw_err_pko; 1522 rstats->fw_err_link = rsp_rstats->fw_err_link; 1523 rstats->fw_err_drop = rsp_rstats->fw_err_drop; 1524 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; 1525 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; 1526 1527 /* Number of packets that are LROed */ 1528 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; 1529 /* Number of octets that are LROed */ 1530 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; 1531 /* Number of LRO packets formed */ 1532 rstats->fw_total_lro = rsp_rstats->fw_total_lro; 1533 /* Number of times lRO of packet aborted */ 1534 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; 1535 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; 1536 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; 1537 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; 1538 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; 1539 /* intrmod: packet forward rate */ 1540 rstats->fwd_rate = rsp_rstats->fwd_rate; 1541 1542 /* TX link-level stats */ 1543 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; 1544 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; 1545 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; 1546 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; 1547 tstats->ctl_sent = rsp_tstats->ctl_sent; 1548 /* Packets sent after one collision*/ 1549 tstats->one_collision_sent = rsp_tstats->one_collision_sent; 1550 /* Packets sent after multiple collision*/ 1551 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; 1552 /* Packets not sent due to max collisions */ 1553 tstats->max_collision_fail = rsp_tstats->max_collision_fail; 1554 /* Packets not sent due to max deferrals */ 1555 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; 1556 /* Accounts for over/under-run of buffers */ 1557 tstats->fifo_err = rsp_tstats->fifo_err; 1558 tstats->runts = rsp_tstats->runts; 1559 /* Total number of collisions detected */ 1560 tstats->total_collisions = rsp_tstats->total_collisions; 1561 1562 /* firmware stats */ 1563 tstats->fw_total_sent = rsp_tstats->fw_total_sent; 1564 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; 1565 tstats->fw_err_pko = rsp_tstats->fw_err_pko; 1566 tstats->fw_err_link = rsp_tstats->fw_err_link; 1567 tstats->fw_err_drop = rsp_tstats->fw_err_drop; 1568 tstats->fw_tso = rsp_tstats->fw_tso; 1569 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; 1570 tstats->fw_err_tso = rsp_tstats->fw_err_tso; 1571 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; 1572 1573 resp->status = 1; 1574 } else { 1575 resp->status = -1; 1576 } 1577 complete(&ctrl->complete); 1578 } 1579 1580 /* Configure interrupt moderation parameters */ 1581 static int octnet_get_link_stats(struct net_device *netdev) 1582 { 1583 struct lio *lio = GET_LIO(netdev); 1584 struct octeon_device *oct_dev = lio->oct_dev; 1585 1586 struct octeon_soft_command *sc; 1587 struct oct_nic_stats_ctrl *ctrl; 1588 struct oct_nic_stats_resp *resp; 1589 1590 int retval; 1591 1592 /* Alloc soft command */ 1593 sc = (struct octeon_soft_command *) 1594 octeon_alloc_soft_command(oct_dev, 1595 0, 1596 sizeof(struct oct_nic_stats_resp), 1597 sizeof(struct octnic_ctrl_pkt)); 1598 1599 if (!sc) 1600 return -ENOMEM; 1601 1602 resp = (struct oct_nic_stats_resp *)sc->virtrptr; 1603 memset(resp, 0, sizeof(struct oct_nic_stats_resp)); 1604 1605 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; 1606 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); 1607 ctrl->netdev = netdev; 1608 init_completion(&ctrl->complete); 1609 1610 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1611 1612 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1613 OPCODE_NIC_PORT_STATS, 0, 0, 0); 1614 1615 sc->callback = octnet_nic_stats_callback; 1616 sc->callback_arg = sc; 1617 sc->wait_time = 500; /*in milli seconds*/ 1618 1619 retval = octeon_send_soft_command(oct_dev, sc); 1620 if (retval == IQ_SEND_FAILED) { 1621 octeon_free_soft_command(oct_dev, sc); 1622 return -EINVAL; 1623 } 1624 1625 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); 1626 1627 if (resp->status != 1) { 1628 octeon_free_soft_command(oct_dev, sc); 1629 1630 return -EINVAL; 1631 } 1632 1633 octeon_free_soft_command(oct_dev, sc); 1634 1635 return 0; 1636 } 1637 1638 static int lio_get_intr_coalesce(struct net_device *netdev, 1639 struct ethtool_coalesce *intr_coal) 1640 { 1641 struct lio *lio = GET_LIO(netdev); 1642 struct octeon_device *oct = lio->oct_dev; 1643 struct octeon_instr_queue *iq; 1644 struct oct_intrmod_cfg intrmod_cfg; 1645 1646 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) 1647 return -ENODEV; 1648 1649 switch (oct->chip_id) { 1650 case OCTEON_CN23XX_PF_VID: 1651 case OCTEON_CN23XX_VF_VID: { 1652 if (!intrmod_cfg.rx_enable) { 1653 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; 1654 intr_coal->rx_max_coalesced_frames = 1655 oct->rx_max_coalesced_frames; 1656 } 1657 if (!intrmod_cfg.tx_enable) 1658 intr_coal->tx_max_coalesced_frames = 1659 oct->tx_max_coalesced_frames; 1660 break; 1661 } 1662 case OCTEON_CN68XX: 1663 case OCTEON_CN66XX: { 1664 struct octeon_cn6xxx *cn6xxx = 1665 (struct octeon_cn6xxx *)oct->chip; 1666 1667 if (!intrmod_cfg.rx_enable) { 1668 intr_coal->rx_coalesce_usecs = 1669 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 1670 intr_coal->rx_max_coalesced_frames = 1671 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 1672 } 1673 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 1674 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 1675 break; 1676 } 1677 default: 1678 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1679 return -EINVAL; 1680 } 1681 if (intrmod_cfg.rx_enable) { 1682 intr_coal->use_adaptive_rx_coalesce = 1683 intrmod_cfg.rx_enable; 1684 intr_coal->rate_sample_interval = 1685 intrmod_cfg.check_intrvl; 1686 intr_coal->pkt_rate_high = 1687 intrmod_cfg.maxpkt_ratethr; 1688 intr_coal->pkt_rate_low = 1689 intrmod_cfg.minpkt_ratethr; 1690 intr_coal->rx_max_coalesced_frames_high = 1691 intrmod_cfg.rx_maxcnt_trigger; 1692 intr_coal->rx_coalesce_usecs_high = 1693 intrmod_cfg.rx_maxtmr_trigger; 1694 intr_coal->rx_coalesce_usecs_low = 1695 intrmod_cfg.rx_mintmr_trigger; 1696 intr_coal->rx_max_coalesced_frames_low = 1697 intrmod_cfg.rx_mincnt_trigger; 1698 } 1699 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 1700 (intrmod_cfg.tx_enable)) { 1701 intr_coal->use_adaptive_tx_coalesce = 1702 intrmod_cfg.tx_enable; 1703 intr_coal->tx_max_coalesced_frames_high = 1704 intrmod_cfg.tx_maxcnt_trigger; 1705 intr_coal->tx_max_coalesced_frames_low = 1706 intrmod_cfg.tx_mincnt_trigger; 1707 } 1708 return 0; 1709 } 1710 1711 /* Enable/Disable auto interrupt Moderation */ 1712 static int oct_cfg_adaptive_intr(struct lio *lio, 1713 struct oct_intrmod_cfg *intrmod_cfg, 1714 struct ethtool_coalesce *intr_coal) 1715 { 1716 int ret = 0; 1717 1718 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { 1719 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; 1720 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; 1721 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; 1722 } 1723 if (intrmod_cfg->rx_enable) { 1724 intrmod_cfg->rx_maxcnt_trigger = 1725 intr_coal->rx_max_coalesced_frames_high; 1726 intrmod_cfg->rx_maxtmr_trigger = 1727 intr_coal->rx_coalesce_usecs_high; 1728 intrmod_cfg->rx_mintmr_trigger = 1729 intr_coal->rx_coalesce_usecs_low; 1730 intrmod_cfg->rx_mincnt_trigger = 1731 intr_coal->rx_max_coalesced_frames_low; 1732 } 1733 if (intrmod_cfg->tx_enable) { 1734 intrmod_cfg->tx_maxcnt_trigger = 1735 intr_coal->tx_max_coalesced_frames_high; 1736 intrmod_cfg->tx_mincnt_trigger = 1737 intr_coal->tx_max_coalesced_frames_low; 1738 } 1739 1740 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 1741 1742 return ret; 1743 } 1744 1745 static int 1746 oct_cfg_rx_intrcnt(struct lio *lio, 1747 struct oct_intrmod_cfg *intrmod, 1748 struct ethtool_coalesce *intr_coal) 1749 { 1750 struct octeon_device *oct = lio->oct_dev; 1751 u32 rx_max_coalesced_frames; 1752 1753 /* Config Cnt based interrupt values */ 1754 switch (oct->chip_id) { 1755 case OCTEON_CN68XX: 1756 case OCTEON_CN66XX: { 1757 struct octeon_cn6xxx *cn6xxx = 1758 (struct octeon_cn6xxx *)oct->chip; 1759 1760 if (!intr_coal->rx_max_coalesced_frames) 1761 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 1762 else 1763 rx_max_coalesced_frames = 1764 intr_coal->rx_max_coalesced_frames; 1765 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 1766 rx_max_coalesced_frames); 1767 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 1768 break; 1769 } 1770 case OCTEON_CN23XX_PF_VID: { 1771 int q_no; 1772 1773 if (!intr_coal->rx_max_coalesced_frames) 1774 rx_max_coalesced_frames = intrmod->rx_frames; 1775 else 1776 rx_max_coalesced_frames = 1777 intr_coal->rx_max_coalesced_frames; 1778 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1779 q_no += oct->sriov_info.pf_srn; 1780 octeon_write_csr64( 1781 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 1782 (octeon_read_csr64( 1783 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 1784 (0x3fffff00000000UL)) | 1785 (rx_max_coalesced_frames - 1)); 1786 /*consider setting resend bit*/ 1787 } 1788 intrmod->rx_frames = rx_max_coalesced_frames; 1789 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 1790 break; 1791 } 1792 case OCTEON_CN23XX_VF_VID: { 1793 int q_no; 1794 1795 if (!intr_coal->rx_max_coalesced_frames) 1796 rx_max_coalesced_frames = intrmod->rx_frames; 1797 else 1798 rx_max_coalesced_frames = 1799 intr_coal->rx_max_coalesced_frames; 1800 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1801 octeon_write_csr64( 1802 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 1803 (octeon_read_csr64( 1804 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 1805 (0x3fffff00000000UL)) | 1806 rx_max_coalesced_frames); 1807 /*consider writing to resend bit here*/ 1808 } 1809 intrmod->rx_frames = rx_max_coalesced_frames; 1810 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 1811 break; 1812 } 1813 default: 1814 return -EINVAL; 1815 } 1816 return 0; 1817 } 1818 1819 static int oct_cfg_rx_intrtime(struct lio *lio, 1820 struct oct_intrmod_cfg *intrmod, 1821 struct ethtool_coalesce *intr_coal) 1822 { 1823 struct octeon_device *oct = lio->oct_dev; 1824 u32 time_threshold, rx_coalesce_usecs; 1825 1826 /* Config Time based interrupt values */ 1827 switch (oct->chip_id) { 1828 case OCTEON_CN68XX: 1829 case OCTEON_CN66XX: { 1830 struct octeon_cn6xxx *cn6xxx = 1831 (struct octeon_cn6xxx *)oct->chip; 1832 if (!intr_coal->rx_coalesce_usecs) 1833 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 1834 else 1835 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1836 1837 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 1838 rx_coalesce_usecs); 1839 octeon_write_csr(oct, 1840 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 1841 time_threshold); 1842 1843 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 1844 break; 1845 } 1846 case OCTEON_CN23XX_PF_VID: { 1847 u64 time_threshold; 1848 int q_no; 1849 1850 if (!intr_coal->rx_coalesce_usecs) 1851 rx_coalesce_usecs = intrmod->rx_usecs; 1852 else 1853 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1854 time_threshold = 1855 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 1856 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1857 q_no += oct->sriov_info.pf_srn; 1858 octeon_write_csr64(oct, 1859 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 1860 (intrmod->rx_frames | 1861 ((u64)time_threshold << 32))); 1862 /*consider writing to resend bit here*/ 1863 } 1864 intrmod->rx_usecs = rx_coalesce_usecs; 1865 oct->rx_coalesce_usecs = rx_coalesce_usecs; 1866 break; 1867 } 1868 case OCTEON_CN23XX_VF_VID: { 1869 u64 time_threshold; 1870 int q_no; 1871 1872 if (!intr_coal->rx_coalesce_usecs) 1873 rx_coalesce_usecs = intrmod->rx_usecs; 1874 else 1875 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1876 1877 time_threshold = 1878 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 1879 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1880 octeon_write_csr64( 1881 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 1882 (intrmod->rx_frames | 1883 ((u64)time_threshold << 32))); 1884 /*consider setting resend bit*/ 1885 } 1886 intrmod->rx_usecs = rx_coalesce_usecs; 1887 oct->rx_coalesce_usecs = rx_coalesce_usecs; 1888 break; 1889 } 1890 default: 1891 return -EINVAL; 1892 } 1893 1894 return 0; 1895 } 1896 1897 static int 1898 oct_cfg_tx_intrcnt(struct lio *lio, 1899 struct oct_intrmod_cfg *intrmod, 1900 struct ethtool_coalesce *intr_coal) 1901 { 1902 struct octeon_device *oct = lio->oct_dev; 1903 u32 iq_intr_pkt; 1904 void __iomem *inst_cnt_reg; 1905 u64 val; 1906 1907 /* Config Cnt based interrupt values */ 1908 switch (oct->chip_id) { 1909 case OCTEON_CN68XX: 1910 case OCTEON_CN66XX: 1911 break; 1912 case OCTEON_CN23XX_VF_VID: 1913 case OCTEON_CN23XX_PF_VID: { 1914 int q_no; 1915 1916 if (!intr_coal->tx_max_coalesced_frames) 1917 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 1918 CN23XX_PKT_IN_DONE_WMARK_MASK; 1919 else 1920 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 1921 CN23XX_PKT_IN_DONE_WMARK_MASK; 1922 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 1923 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 1924 val = readq(inst_cnt_reg); 1925 /*clear wmark and count.dont want to write count back*/ 1926 val = (val & 0xFFFF000000000000ULL) | 1927 ((u64)(iq_intr_pkt - 1) 1928 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 1929 writeq(val, inst_cnt_reg); 1930 /*consider setting resend bit*/ 1931 } 1932 intrmod->tx_frames = iq_intr_pkt; 1933 oct->tx_max_coalesced_frames = iq_intr_pkt; 1934 break; 1935 } 1936 default: 1937 return -EINVAL; 1938 } 1939 return 0; 1940 } 1941 1942 static int lio_set_intr_coalesce(struct net_device *netdev, 1943 struct ethtool_coalesce *intr_coal) 1944 { 1945 struct lio *lio = GET_LIO(netdev); 1946 int ret; 1947 struct octeon_device *oct = lio->oct_dev; 1948 struct oct_intrmod_cfg intrmod = {0}; 1949 u32 j, q_no; 1950 int db_max, db_min; 1951 1952 switch (oct->chip_id) { 1953 case OCTEON_CN68XX: 1954 case OCTEON_CN66XX: 1955 db_min = CN6XXX_DB_MIN; 1956 db_max = CN6XXX_DB_MAX; 1957 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 1958 (intr_coal->tx_max_coalesced_frames <= db_max)) { 1959 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1960 q_no = lio->linfo.txpciq[j].s.q_no; 1961 oct->instr_queue[q_no]->fill_threshold = 1962 intr_coal->tx_max_coalesced_frames; 1963 } 1964 } else { 1965 dev_err(&oct->pci_dev->dev, 1966 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 1967 intr_coal->tx_max_coalesced_frames, 1968 db_min, db_max); 1969 return -EINVAL; 1970 } 1971 break; 1972 case OCTEON_CN23XX_PF_VID: 1973 case OCTEON_CN23XX_VF_VID: 1974 break; 1975 default: 1976 return -EINVAL; 1977 } 1978 1979 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 1980 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 1981 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 1982 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 1983 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 1984 1985 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); 1986 1987 if (!intr_coal->use_adaptive_rx_coalesce) { 1988 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); 1989 if (ret) 1990 goto ret_intrmod; 1991 1992 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); 1993 if (ret) 1994 goto ret_intrmod; 1995 } else { 1996 oct->rx_coalesce_usecs = 1997 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 1998 oct->rx_max_coalesced_frames = 1999 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2000 } 2001 2002 if (!intr_coal->use_adaptive_tx_coalesce) { 2003 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); 2004 if (ret) 2005 goto ret_intrmod; 2006 } else { 2007 oct->tx_max_coalesced_frames = 2008 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2009 } 2010 2011 return 0; 2012 ret_intrmod: 2013 return ret; 2014 } 2015 2016 static int lio_get_ts_info(struct net_device *netdev, 2017 struct ethtool_ts_info *info) 2018 { 2019 struct lio *lio = GET_LIO(netdev); 2020 2021 info->so_timestamping = 2022 #ifdef PTP_HARDWARE_TIMESTAMPING 2023 SOF_TIMESTAMPING_TX_HARDWARE | 2024 SOF_TIMESTAMPING_RX_HARDWARE | 2025 SOF_TIMESTAMPING_RAW_HARDWARE | 2026 SOF_TIMESTAMPING_TX_SOFTWARE | 2027 #endif 2028 SOF_TIMESTAMPING_RX_SOFTWARE | 2029 SOF_TIMESTAMPING_SOFTWARE; 2030 2031 if (lio->ptp_clock) 2032 info->phc_index = ptp_clock_index(lio->ptp_clock); 2033 else 2034 info->phc_index = -1; 2035 2036 #ifdef PTP_HARDWARE_TIMESTAMPING 2037 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2038 2039 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2040 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 2041 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2042 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 2043 #endif 2044 2045 return 0; 2046 } 2047 2048 /* Return register dump len. */ 2049 static int lio_get_regs_len(struct net_device *dev) 2050 { 2051 struct lio *lio = GET_LIO(dev); 2052 struct octeon_device *oct = lio->oct_dev; 2053 2054 switch (oct->chip_id) { 2055 case OCTEON_CN23XX_PF_VID: 2056 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 2057 case OCTEON_CN23XX_VF_VID: 2058 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 2059 default: 2060 return OCT_ETHTOOL_REGDUMP_LEN; 2061 } 2062 } 2063 2064 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 2065 { 2066 u32 reg; 2067 u8 pf_num = oct->pf_num; 2068 int len = 0; 2069 int i; 2070 2071 /* PCI Window Registers */ 2072 2073 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2074 2075 /*0x29030 or 0x29040*/ 2076 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 2077 len += sprintf(s + len, 2078 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 2079 reg, oct->pcie_port, oct->pf_num, 2080 (u64)octeon_read_csr64(oct, reg)); 2081 2082 /*0x27080 or 0x27090*/ 2083 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 2084 len += 2085 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 2086 reg, oct->pcie_port, oct->pf_num, 2087 (u64)octeon_read_csr64(oct, reg)); 2088 2089 /*0x27000 or 0x27010*/ 2090 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 2091 len += 2092 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 2093 reg, oct->pcie_port, oct->pf_num, 2094 (u64)octeon_read_csr64(oct, reg)); 2095 2096 /*0x29120*/ 2097 reg = 0x29120; 2098 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2099 (u64)octeon_read_csr64(oct, reg)); 2100 2101 /*0x27300*/ 2102 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2103 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2104 len += sprintf( 2105 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2106 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2107 2108 /*0x27200*/ 2109 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2110 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2111 len += sprintf(s + len, 2112 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2113 reg, oct->pcie_port, oct->pf_num, 2114 (u64)octeon_read_csr64(oct, reg)); 2115 2116 /*29130*/ 2117 reg = CN23XX_SLI_PKT_CNT_INT; 2118 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2119 (u64)octeon_read_csr64(oct, reg)); 2120 2121 /*0x29140*/ 2122 reg = CN23XX_SLI_PKT_TIME_INT; 2123 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2124 (u64)octeon_read_csr64(oct, reg)); 2125 2126 /*0x29160*/ 2127 reg = 0x29160; 2128 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2129 (u64)octeon_read_csr64(oct, reg)); 2130 2131 /*0x29180*/ 2132 reg = CN23XX_SLI_OQ_WMARK; 2133 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2134 reg, (u64)octeon_read_csr64(oct, reg)); 2135 2136 /*0x291E0*/ 2137 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2138 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2139 (u64)octeon_read_csr64(oct, reg)); 2140 2141 /*0x29210*/ 2142 reg = CN23XX_SLI_GBL_CONTROL; 2143 len += sprintf(s + len, 2144 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2145 (u64)octeon_read_csr64(oct, reg)); 2146 2147 /*0x29220*/ 2148 reg = 0x29220; 2149 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2150 reg, (u64)octeon_read_csr64(oct, reg)); 2151 2152 /*PF only*/ 2153 if (pf_num == 0) { 2154 /*0x29260*/ 2155 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2156 len += sprintf(s + len, 2157 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2158 reg, (u64)octeon_read_csr64(oct, reg)); 2159 } else if (pf_num == 1) { 2160 /*0x29270*/ 2161 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2162 len += sprintf(s + len, 2163 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2164 reg, (u64)octeon_read_csr64(oct, reg)); 2165 } 2166 2167 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2168 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2169 len += 2170 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2171 reg, i, (u64)octeon_read_csr64(oct, reg)); 2172 } 2173 2174 /*0x10040*/ 2175 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2176 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2177 len += sprintf(s + len, 2178 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2179 reg, i, (u64)octeon_read_csr64(oct, reg)); 2180 } 2181 2182 /*0x10080*/ 2183 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2184 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2185 len += sprintf(s + len, 2186 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2187 reg, i, (u64)octeon_read_csr64(oct, reg)); 2188 } 2189 2190 /*0x10090*/ 2191 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2192 reg = CN23XX_SLI_OQ_SIZE(i); 2193 len += sprintf( 2194 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2195 reg, i, (u64)octeon_read_csr64(oct, reg)); 2196 } 2197 2198 /*0x10050*/ 2199 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2200 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2201 len += sprintf( 2202 s + len, 2203 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2204 reg, i, (u64)octeon_read_csr64(oct, reg)); 2205 } 2206 2207 /*0x10070*/ 2208 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2209 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2210 len += sprintf(s + len, 2211 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2212 reg, i, (u64)octeon_read_csr64(oct, reg)); 2213 } 2214 2215 /*0x100a0*/ 2216 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2217 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2218 len += sprintf(s + len, 2219 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2220 reg, i, (u64)octeon_read_csr64(oct, reg)); 2221 } 2222 2223 /*0x100b0*/ 2224 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2225 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2226 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2227 reg, i, (u64)octeon_read_csr64(oct, reg)); 2228 } 2229 2230 /*0x100c0*/ 2231 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2232 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2233 len += sprintf(s + len, 2234 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2235 reg, i, (u64)octeon_read_csr64(oct, reg)); 2236 2237 /*0x10000*/ 2238 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2239 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2240 len += sprintf( 2241 s + len, 2242 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2243 reg, i, (u64)octeon_read_csr64(oct, reg)); 2244 } 2245 2246 /*0x10010*/ 2247 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2248 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2249 len += sprintf( 2250 s + len, 2251 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2252 i, (u64)octeon_read_csr64(oct, reg)); 2253 } 2254 2255 /*0x10020*/ 2256 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2257 reg = CN23XX_SLI_IQ_DOORBELL(i); 2258 len += sprintf( 2259 s + len, 2260 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2261 reg, i, (u64)octeon_read_csr64(oct, reg)); 2262 } 2263 2264 /*0x10030*/ 2265 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2266 reg = CN23XX_SLI_IQ_SIZE(i); 2267 len += sprintf( 2268 s + len, 2269 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2270 reg, i, (u64)octeon_read_csr64(oct, reg)); 2271 } 2272 2273 /*0x10040*/ 2274 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2275 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2276 len += sprintf(s + len, 2277 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2278 reg, i, (u64)octeon_read_csr64(oct, reg)); 2279 } 2280 2281 return len; 2282 } 2283 2284 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2285 { 2286 int len = 0; 2287 u32 reg; 2288 int i; 2289 2290 /* PCI Window Registers */ 2291 2292 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2293 2294 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2295 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2296 len += sprintf(s + len, 2297 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2298 reg, i, (u64)octeon_read_csr64(oct, reg)); 2299 } 2300 2301 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2302 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2303 len += sprintf(s + len, 2304 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2305 reg, i, (u64)octeon_read_csr64(oct, reg)); 2306 } 2307 2308 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2309 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2310 len += sprintf(s + len, 2311 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2312 reg, i, (u64)octeon_read_csr64(oct, reg)); 2313 } 2314 2315 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2316 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2317 len += sprintf(s + len, 2318 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2319 reg, i, (u64)octeon_read_csr64(oct, reg)); 2320 } 2321 2322 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2323 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2324 len += sprintf(s + len, 2325 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2326 reg, i, (u64)octeon_read_csr64(oct, reg)); 2327 } 2328 2329 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2330 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2331 len += sprintf(s + len, 2332 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2333 reg, i, (u64)octeon_read_csr64(oct, reg)); 2334 } 2335 2336 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2337 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2338 len += sprintf(s + len, 2339 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2340 reg, i, (u64)octeon_read_csr64(oct, reg)); 2341 } 2342 2343 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2344 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2345 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2346 reg, i, (u64)octeon_read_csr64(oct, reg)); 2347 } 2348 2349 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2350 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2351 len += sprintf(s + len, 2352 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2353 reg, i, (u64)octeon_read_csr64(oct, reg)); 2354 } 2355 2356 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2357 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2358 len += sprintf(s + len, 2359 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2360 reg, i, (u64)octeon_read_csr64(oct, reg)); 2361 } 2362 2363 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2364 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2365 len += sprintf(s + len, 2366 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2367 reg, i, (u64)octeon_read_csr64(oct, reg)); 2368 } 2369 2370 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2371 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2372 len += sprintf(s + len, 2373 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2374 reg, i, (u64)octeon_read_csr64(oct, reg)); 2375 } 2376 2377 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2378 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2379 len += sprintf(s + len, 2380 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2381 reg, i, (u64)octeon_read_csr64(oct, reg)); 2382 } 2383 2384 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2385 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2386 len += sprintf(s + len, 2387 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2388 reg, i, (u64)octeon_read_csr64(oct, reg)); 2389 } 2390 2391 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2392 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2393 len += sprintf(s + len, 2394 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2395 reg, i, (u64)octeon_read_csr64(oct, reg)); 2396 } 2397 2398 return len; 2399 } 2400 2401 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2402 { 2403 u32 reg; 2404 int i, len = 0; 2405 2406 /* PCI Window Registers */ 2407 2408 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2409 reg = CN6XXX_WIN_WR_ADDR_LO; 2410 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2411 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2412 reg = CN6XXX_WIN_WR_ADDR_HI; 2413 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2414 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2415 reg = CN6XXX_WIN_RD_ADDR_LO; 2416 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2417 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2418 reg = CN6XXX_WIN_RD_ADDR_HI; 2419 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2420 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2421 reg = CN6XXX_WIN_WR_DATA_LO; 2422 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2423 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2424 reg = CN6XXX_WIN_WR_DATA_HI; 2425 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2426 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2427 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2428 CN6XXX_WIN_WR_MASK_REG, 2429 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2430 2431 /* PCI Interrupt Register */ 2432 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2433 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2434 CN6XXX_SLI_INT_ENB64_PORT0)); 2435 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2436 CN6XXX_SLI_INT_ENB64_PORT1, 2437 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2438 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2439 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2440 2441 /* PCI Output queue registers */ 2442 for (i = 0; i < oct->num_oqs; i++) { 2443 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2444 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2445 reg, i, octeon_read_csr(oct, reg)); 2446 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2447 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2448 reg, i, octeon_read_csr(oct, reg)); 2449 } 2450 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2451 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2452 reg, octeon_read_csr(oct, reg)); 2453 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 2454 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 2455 reg, octeon_read_csr(oct, reg)); 2456 2457 /* PCI Input queue registers */ 2458 for (i = 0; i <= 3; i++) { 2459 u32 reg; 2460 2461 reg = CN6XXX_SLI_IQ_DOORBELL(i); 2462 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 2463 reg, i, octeon_read_csr(oct, reg)); 2464 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 2465 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 2466 reg, i, octeon_read_csr(oct, reg)); 2467 } 2468 2469 /* PCI DMA registers */ 2470 2471 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 2472 CN6XXX_DMA_CNT(0), 2473 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 2474 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 2475 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 2476 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 2477 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 2478 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 2479 CN6XXX_DMA_TIME_INT_LEVEL(0), 2480 octeon_read_csr(oct, reg)); 2481 2482 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 2483 CN6XXX_DMA_CNT(1), 2484 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 2485 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2486 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 2487 CN6XXX_DMA_PKT_INT_LEVEL(1), 2488 octeon_read_csr(oct, reg)); 2489 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2490 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 2491 CN6XXX_DMA_TIME_INT_LEVEL(1), 2492 octeon_read_csr(oct, reg)); 2493 2494 /* PCI Index registers */ 2495 2496 len += sprintf(s + len, "\n"); 2497 2498 for (i = 0; i < 16; i++) { 2499 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 2500 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 2501 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 2502 } 2503 2504 return len; 2505 } 2506 2507 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 2508 { 2509 u32 val; 2510 int i, len = 0; 2511 2512 /* PCI CONFIG Registers */ 2513 2514 len += sprintf(s + len, 2515 "\n\t Octeon Config space Registers\n\n"); 2516 2517 for (i = 0; i <= 13; i++) { 2518 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2519 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2520 (i * 4), i, val); 2521 } 2522 2523 for (i = 30; i <= 34; i++) { 2524 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2525 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2526 (i * 4), i, val); 2527 } 2528 2529 return len; 2530 } 2531 2532 /* Return register dump user app. */ 2533 static void lio_get_regs(struct net_device *dev, 2534 struct ethtool_regs *regs, void *regbuf) 2535 { 2536 struct lio *lio = GET_LIO(dev); 2537 int len = 0; 2538 struct octeon_device *oct = lio->oct_dev; 2539 2540 regs->version = OCT_ETHTOOL_REGSVER; 2541 2542 switch (oct->chip_id) { 2543 case OCTEON_CN23XX_PF_VID: 2544 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 2545 len += cn23xx_read_csr_reg(regbuf + len, oct); 2546 break; 2547 case OCTEON_CN23XX_VF_VID: 2548 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 2549 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 2550 break; 2551 case OCTEON_CN68XX: 2552 case OCTEON_CN66XX: 2553 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 2554 len += cn6xxx_read_csr_reg(regbuf + len, oct); 2555 len += cn6xxx_read_config_reg(regbuf + len, oct); 2556 break; 2557 default: 2558 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 2559 __func__, oct->chip_id); 2560 } 2561 } 2562 2563 static u32 lio_get_priv_flags(struct net_device *netdev) 2564 { 2565 struct lio *lio = GET_LIO(netdev); 2566 2567 return lio->oct_dev->priv_flags; 2568 } 2569 2570 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 2571 { 2572 struct lio *lio = GET_LIO(netdev); 2573 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 2574 2575 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 2576 intr_by_tx_bytes); 2577 return 0; 2578 } 2579 2580 static const struct ethtool_ops lio_ethtool_ops = { 2581 .get_link_ksettings = lio_get_link_ksettings, 2582 .get_link = ethtool_op_get_link, 2583 .get_drvinfo = lio_get_drvinfo, 2584 .get_ringparam = lio_ethtool_get_ringparam, 2585 .get_channels = lio_ethtool_get_channels, 2586 .set_phys_id = lio_set_phys_id, 2587 .get_eeprom_len = lio_get_eeprom_len, 2588 .get_eeprom = lio_get_eeprom, 2589 .get_strings = lio_get_strings, 2590 .get_ethtool_stats = lio_get_ethtool_stats, 2591 .get_pauseparam = lio_get_pauseparam, 2592 .set_pauseparam = lio_set_pauseparam, 2593 .get_regs_len = lio_get_regs_len, 2594 .get_regs = lio_get_regs, 2595 .get_msglevel = lio_get_msglevel, 2596 .set_msglevel = lio_set_msglevel, 2597 .get_sset_count = lio_get_sset_count, 2598 .get_coalesce = lio_get_intr_coalesce, 2599 .set_coalesce = lio_set_intr_coalesce, 2600 .get_priv_flags = lio_get_priv_flags, 2601 .set_priv_flags = lio_set_priv_flags, 2602 .get_ts_info = lio_get_ts_info, 2603 }; 2604 2605 static const struct ethtool_ops lio_vf_ethtool_ops = { 2606 .get_link_ksettings = lio_get_link_ksettings, 2607 .get_link = ethtool_op_get_link, 2608 .get_drvinfo = lio_get_vf_drvinfo, 2609 .get_ringparam = lio_ethtool_get_ringparam, 2610 .get_channels = lio_ethtool_get_channels, 2611 .get_strings = lio_vf_get_strings, 2612 .get_ethtool_stats = lio_vf_get_ethtool_stats, 2613 .get_regs_len = lio_get_regs_len, 2614 .get_regs = lio_get_regs, 2615 .get_msglevel = lio_get_msglevel, 2616 .set_msglevel = lio_set_msglevel, 2617 .get_sset_count = lio_vf_get_sset_count, 2618 .get_coalesce = lio_get_intr_coalesce, 2619 .set_coalesce = lio_set_intr_coalesce, 2620 .get_priv_flags = lio_get_priv_flags, 2621 .set_priv_flags = lio_set_priv_flags, 2622 .get_ts_info = lio_get_ts_info, 2623 }; 2624 2625 void liquidio_set_ethtool_ops(struct net_device *netdev) 2626 { 2627 struct lio *lio = GET_LIO(netdev); 2628 struct octeon_device *oct = lio->oct_dev; 2629 2630 if (OCTEON_CN23XX_VF(oct)) 2631 netdev->ethtool_ops = &lio_vf_ethtool_ops; 2632 else 2633 netdev->ethtool_ops = &lio_ethtool_ops; 2634 } 2635