1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/netdevice.h> 19 #include <linux/net_tstamp.h> 20 #include <linux/pci.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_nic.h" 27 #include "octeon_main.h" 28 #include "octeon_network.h" 29 #include "cn66xx_regs.h" 30 #include "cn66xx_device.h" 31 #include "cn23xx_pf_device.h" 32 #include "cn23xx_vf_device.h" 33 34 static int octnet_get_link_stats(struct net_device *netdev); 35 36 struct oct_mdio_cmd_context { 37 int octeon_id; 38 wait_queue_head_t wc; 39 int cond; 40 }; 41 42 struct oct_mdio_cmd_resp { 43 u64 rh; 44 struct oct_mdio_cmd resp; 45 u64 status; 46 }; 47 48 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 49 50 /* Octeon's interface mode of operation */ 51 enum { 52 INTERFACE_MODE_DISABLED, 53 INTERFACE_MODE_RGMII, 54 INTERFACE_MODE_GMII, 55 INTERFACE_MODE_SPI, 56 INTERFACE_MODE_PCIE, 57 INTERFACE_MODE_XAUI, 58 INTERFACE_MODE_SGMII, 59 INTERFACE_MODE_PICMG, 60 INTERFACE_MODE_NPI, 61 INTERFACE_MODE_LOOP, 62 INTERFACE_MODE_SRIO, 63 INTERFACE_MODE_ILK, 64 INTERFACE_MODE_RXAUI, 65 INTERFACE_MODE_QSGMII, 66 INTERFACE_MODE_AGL, 67 INTERFACE_MODE_XLAUI, 68 INTERFACE_MODE_XFI, 69 INTERFACE_MODE_10G_KR, 70 INTERFACE_MODE_40G_KR4, 71 INTERFACE_MODE_MIXED, 72 }; 73 74 #define OCT_ETHTOOL_REGDUMP_LEN 4096 75 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 77 #define OCT_ETHTOOL_REGSVER 1 78 79 /* statistics of PF */ 80 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 81 "rx_packets", 82 "tx_packets", 83 "rx_bytes", 84 "tx_bytes", 85 "rx_errors", /*jabber_err+l2_err+frame_err */ 86 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ 87 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + 88 *st->fromwire.dmac_drop + st->fromwire.fw_err_drop 89 */ 90 "tx_dropped", 91 92 "tx_total_sent", 93 "tx_total_fwd", 94 "tx_err_pko", 95 "tx_err_link", 96 "tx_err_drop", 97 98 "tx_tso", 99 "tx_tso_packets", 100 "tx_tso_err", 101 "tx_vxlan", 102 103 "mac_tx_total_pkts", 104 "mac_tx_total_bytes", 105 "mac_tx_mcast_pkts", 106 "mac_tx_bcast_pkts", 107 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ 108 "mac_tx_total_collisions", 109 "mac_tx_one_collision", 110 "mac_tx_multi_collison", 111 "mac_tx_max_collision_fail", 112 "mac_tx_max_deferal_fail", 113 "mac_tx_fifo_err", 114 "mac_tx_runts", 115 116 "rx_total_rcvd", 117 "rx_total_fwd", 118 "rx_jabber_err", 119 "rx_l2_err", 120 "rx_frame_err", 121 "rx_err_pko", 122 "rx_err_link", 123 "rx_err_drop", 124 125 "rx_vxlan", 126 "rx_vxlan_err", 127 128 "rx_lro_pkts", 129 "rx_lro_bytes", 130 "rx_total_lro", 131 132 "rx_lro_aborts", 133 "rx_lro_aborts_port", 134 "rx_lro_aborts_seq", 135 "rx_lro_aborts_tsval", 136 "rx_lro_aborts_timer", 137 "rx_fwd_rate", 138 139 "mac_rx_total_rcvd", 140 "mac_rx_bytes", 141 "mac_rx_total_bcst", 142 "mac_rx_total_mcst", 143 "mac_rx_runts", 144 "mac_rx_ctl_packets", 145 "mac_rx_fifo_err", 146 "mac_rx_dma_drop", 147 "mac_rx_fcs_err", 148 149 "link_state_changes", 150 }; 151 152 /* statistics of VF */ 153 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 154 "rx_packets", 155 "tx_packets", 156 "rx_bytes", 157 "tx_bytes", 158 "rx_errors", /* jabber_err + l2_err+frame_err */ 159 "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */ 160 "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */ 161 "tx_dropped", 162 "link_state_changes", 163 }; 164 165 /* statistics of host tx queue */ 166 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 167 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ 168 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ 169 "dropped", 170 "iq_busy", 171 "sgentry_sent", 172 173 "fw_instr_posted", 174 "fw_instr_processed", 175 "fw_instr_dropped", 176 "fw_bytes_sent", 177 178 "tso", 179 "vxlan", 180 "txq_restart", 181 }; 182 183 /* statistics of host rx queue */ 184 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 185 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ 186 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ 187 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ 188 *oct->droq[oq_no]->stats.dropped_nodispatch+ 189 *oct->droq[oq_no]->stats.dropped_toomany+ 190 *oct->droq[oq_no]->stats.dropped_nomem 191 */ 192 "dropped_nomem", 193 "dropped_toomany", 194 "fw_dropped", 195 "fw_pkts_received", 196 "fw_bytes_received", 197 "fw_dropped_nodispatch", 198 199 "vxlan", 200 "buffer_alloc_failure", 201 }; 202 203 /* LiquidIO driver private flags */ 204 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 205 }; 206 207 #define OCTNIC_NCMD_AUTONEG_ON 0x1 208 #define OCTNIC_NCMD_PHY_ON 0x2 209 210 static int lio_get_link_ksettings(struct net_device *netdev, 211 struct ethtool_link_ksettings *ecmd) 212 { 213 struct lio *lio = GET_LIO(netdev); 214 struct octeon_device *oct = lio->oct_dev; 215 struct oct_link_info *linfo; 216 u32 supported, advertising; 217 218 linfo = &lio->linfo; 219 220 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 221 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 222 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 223 ecmd->base.port = PORT_FIBRE; 224 supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | 225 SUPPORTED_Pause); 226 advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); 227 ethtool_convert_legacy_u32_to_link_mode( 228 ecmd->link_modes.supported, supported); 229 ethtool_convert_legacy_u32_to_link_mode( 230 ecmd->link_modes.advertising, advertising); 231 ecmd->base.autoneg = AUTONEG_DISABLE; 232 233 } else { 234 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", 235 linfo->link.s.if_mode); 236 } 237 238 if (linfo->link.s.link_up) { 239 ecmd->base.speed = linfo->link.s.speed; 240 ecmd->base.duplex = linfo->link.s.duplex; 241 } else { 242 ecmd->base.speed = SPEED_UNKNOWN; 243 ecmd->base.duplex = DUPLEX_UNKNOWN; 244 } 245 246 return 0; 247 } 248 249 static void 250 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 251 { 252 struct lio *lio; 253 struct octeon_device *oct; 254 255 lio = GET_LIO(netdev); 256 oct = lio->oct_dev; 257 258 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 259 strcpy(drvinfo->driver, "liquidio"); 260 strcpy(drvinfo->version, LIQUIDIO_VERSION); 261 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 262 ETHTOOL_FWVERS_LEN); 263 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 264 } 265 266 static void 267 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 268 { 269 struct octeon_device *oct; 270 struct lio *lio; 271 272 lio = GET_LIO(netdev); 273 oct = lio->oct_dev; 274 275 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 276 strcpy(drvinfo->driver, "liquidio_vf"); 277 strcpy(drvinfo->version, LIQUIDIO_VERSION); 278 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 279 ETHTOOL_FWVERS_LEN); 280 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 281 } 282 283 static void 284 lio_ethtool_get_channels(struct net_device *dev, 285 struct ethtool_channels *channel) 286 { 287 struct lio *lio = GET_LIO(dev); 288 struct octeon_device *oct = lio->oct_dev; 289 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 290 291 if (OCTEON_CN6XXX(oct)) { 292 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 293 294 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 295 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 296 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 297 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 298 } else if (OCTEON_CN23XX_PF(oct)) { 299 300 max_rx = oct->sriov_info.num_pf_rings; 301 max_tx = oct->sriov_info.num_pf_rings; 302 rx_count = lio->linfo.num_rxpciq; 303 tx_count = lio->linfo.num_txpciq; 304 } else if (OCTEON_CN23XX_VF(oct)) { 305 max_tx = oct->sriov_info.rings_per_vf; 306 max_rx = oct->sriov_info.rings_per_vf; 307 rx_count = lio->linfo.num_rxpciq; 308 tx_count = lio->linfo.num_txpciq; 309 } 310 311 channel->max_rx = max_rx; 312 channel->max_tx = max_tx; 313 channel->rx_count = rx_count; 314 channel->tx_count = tx_count; 315 } 316 317 static int lio_get_eeprom_len(struct net_device *netdev) 318 { 319 u8 buf[128]; 320 struct lio *lio = GET_LIO(netdev); 321 struct octeon_device *oct_dev = lio->oct_dev; 322 struct octeon_board_info *board_info; 323 int len; 324 325 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 326 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 327 board_info->name, board_info->serial_number, 328 board_info->major, board_info->minor); 329 330 return len; 331 } 332 333 static int 334 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 335 u8 *bytes) 336 { 337 struct lio *lio = GET_LIO(netdev); 338 struct octeon_device *oct_dev = lio->oct_dev; 339 struct octeon_board_info *board_info; 340 341 if (eeprom->offset) 342 return -EINVAL; 343 344 eeprom->magic = oct_dev->pci_dev->vendor; 345 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 346 sprintf((char *)bytes, 347 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 348 board_info->name, board_info->serial_number, 349 board_info->major, board_info->minor); 350 351 return 0; 352 } 353 354 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 355 { 356 struct lio *lio = GET_LIO(netdev); 357 struct octeon_device *oct = lio->oct_dev; 358 struct octnic_ctrl_pkt nctrl; 359 int ret = 0; 360 361 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 362 363 nctrl.ncmd.u64 = 0; 364 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 365 nctrl.ncmd.s.param1 = addr; 366 nctrl.ncmd.s.param2 = val; 367 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 368 nctrl.wait_time = 100; 369 nctrl.netpndev = (u64)netdev; 370 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 371 372 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 373 if (ret < 0) { 374 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 375 return -EINVAL; 376 } 377 378 return 0; 379 } 380 381 static int octnet_id_active(struct net_device *netdev, int val) 382 { 383 struct lio *lio = GET_LIO(netdev); 384 struct octeon_device *oct = lio->oct_dev; 385 struct octnic_ctrl_pkt nctrl; 386 int ret = 0; 387 388 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 389 390 nctrl.ncmd.u64 = 0; 391 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 392 nctrl.ncmd.s.param1 = val; 393 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 394 nctrl.wait_time = 100; 395 nctrl.netpndev = (u64)netdev; 396 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 397 398 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 399 if (ret < 0) { 400 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 401 return -EINVAL; 402 } 403 404 return 0; 405 } 406 407 /* Callback for when mdio command response arrives 408 */ 409 static void octnet_mdio_resp_callback(struct octeon_device *oct, 410 u32 status, 411 void *buf) 412 { 413 struct oct_mdio_cmd_context *mdio_cmd_ctx; 414 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 415 416 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 417 418 oct = lio_get_device(mdio_cmd_ctx->octeon_id); 419 if (status) { 420 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", 421 CVM_CAST64(status)); 422 WRITE_ONCE(mdio_cmd_ctx->cond, -1); 423 } else { 424 WRITE_ONCE(mdio_cmd_ctx->cond, 1); 425 } 426 wake_up_interruptible(&mdio_cmd_ctx->wc); 427 } 428 429 /* This routine provides PHY access routines for 430 * mdio clause45 . 431 */ 432 static int 433 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 434 { 435 struct octeon_device *oct_dev = lio->oct_dev; 436 struct octeon_soft_command *sc; 437 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 438 struct oct_mdio_cmd_context *mdio_cmd_ctx; 439 struct oct_mdio_cmd *mdio_cmd; 440 int retval = 0; 441 442 sc = (struct octeon_soft_command *) 443 octeon_alloc_soft_command(oct_dev, 444 sizeof(struct oct_mdio_cmd), 445 sizeof(struct oct_mdio_cmd_resp), 446 sizeof(struct oct_mdio_cmd_context)); 447 448 if (!sc) 449 return -ENOMEM; 450 451 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 452 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 453 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 454 455 WRITE_ONCE(mdio_cmd_ctx->cond, 0); 456 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); 457 mdio_cmd->op = op; 458 mdio_cmd->mdio_addr = loc; 459 if (op) 460 mdio_cmd->value1 = *value; 461 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 462 463 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 464 465 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 466 0, 0, 0); 467 468 sc->wait_time = 1000; 469 sc->callback = octnet_mdio_resp_callback; 470 sc->callback_arg = sc; 471 472 init_waitqueue_head(&mdio_cmd_ctx->wc); 473 474 retval = octeon_send_soft_command(oct_dev, sc); 475 476 if (retval == IQ_SEND_FAILED) { 477 dev_err(&oct_dev->pci_dev->dev, 478 "octnet_mdio45_access instruction failed status: %x\n", 479 retval); 480 retval = -EBUSY; 481 } else { 482 /* Sleep on a wait queue till the cond flag indicates that the 483 * response arrived 484 */ 485 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); 486 retval = mdio_cmd_rsp->status; 487 if (retval) { 488 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); 489 retval = -EBUSY; 490 } else { 491 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 492 sizeof(struct oct_mdio_cmd) / 8); 493 494 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { 495 if (!op) 496 *value = mdio_cmd_rsp->resp.value1; 497 } else { 498 retval = -EINVAL; 499 } 500 } 501 } 502 503 octeon_free_soft_command(oct_dev, sc); 504 505 return retval; 506 } 507 508 static int lio_set_phys_id(struct net_device *netdev, 509 enum ethtool_phys_id_state state) 510 { 511 struct lio *lio = GET_LIO(netdev); 512 struct octeon_device *oct = lio->oct_dev; 513 int value, ret; 514 515 switch (state) { 516 case ETHTOOL_ID_ACTIVE: 517 if (oct->chip_id == OCTEON_CN66XX) { 518 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 519 VITESSE_PHY_GPIO_DRIVEON); 520 return 2; 521 522 } else if (oct->chip_id == OCTEON_CN68XX) { 523 /* Save the current LED settings */ 524 ret = octnet_mdio45_access(lio, 0, 525 LIO68XX_LED_BEACON_ADDR, 526 &lio->phy_beacon_val); 527 if (ret) 528 return ret; 529 530 ret = octnet_mdio45_access(lio, 0, 531 LIO68XX_LED_CTRL_ADDR, 532 &lio->led_ctrl_val); 533 if (ret) 534 return ret; 535 536 /* Configure Beacon values */ 537 value = LIO68XX_LED_BEACON_CFGON; 538 ret = octnet_mdio45_access(lio, 1, 539 LIO68XX_LED_BEACON_ADDR, 540 &value); 541 if (ret) 542 return ret; 543 544 value = LIO68XX_LED_CTRL_CFGON; 545 ret = octnet_mdio45_access(lio, 1, 546 LIO68XX_LED_CTRL_ADDR, 547 &value); 548 if (ret) 549 return ret; 550 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 551 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 552 553 /* returns 0 since updates are asynchronous */ 554 return 0; 555 } else { 556 return -EINVAL; 557 } 558 break; 559 560 case ETHTOOL_ID_ON: 561 if (oct->chip_id == OCTEON_CN66XX) { 562 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 563 VITESSE_PHY_GPIO_HIGH); 564 565 } else if (oct->chip_id == OCTEON_CN68XX) { 566 return -EINVAL; 567 } else { 568 return -EINVAL; 569 } 570 break; 571 572 case ETHTOOL_ID_OFF: 573 if (oct->chip_id == OCTEON_CN66XX) 574 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 575 VITESSE_PHY_GPIO_LOW); 576 else if (oct->chip_id == OCTEON_CN68XX) 577 return -EINVAL; 578 else 579 return -EINVAL; 580 581 break; 582 583 case ETHTOOL_ID_INACTIVE: 584 if (oct->chip_id == OCTEON_CN66XX) { 585 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 586 VITESSE_PHY_GPIO_DRIVEOFF); 587 } else if (oct->chip_id == OCTEON_CN68XX) { 588 /* Restore LED settings */ 589 ret = octnet_mdio45_access(lio, 1, 590 LIO68XX_LED_CTRL_ADDR, 591 &lio->led_ctrl_val); 592 if (ret) 593 return ret; 594 595 ret = octnet_mdio45_access(lio, 1, 596 LIO68XX_LED_BEACON_ADDR, 597 &lio->phy_beacon_val); 598 if (ret) 599 return ret; 600 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 601 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 602 603 return 0; 604 } else { 605 return -EINVAL; 606 } 607 break; 608 609 default: 610 return -EINVAL; 611 } 612 613 return 0; 614 } 615 616 static void 617 lio_ethtool_get_ringparam(struct net_device *netdev, 618 struct ethtool_ringparam *ering) 619 { 620 struct lio *lio = GET_LIO(netdev); 621 struct octeon_device *oct = lio->oct_dev; 622 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 623 rx_pending = 0; 624 625 if (OCTEON_CN6XXX(oct)) { 626 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 627 628 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 629 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 630 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 631 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 632 } else if (OCTEON_CN23XX_PF(oct)) { 633 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); 634 635 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 636 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 637 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx); 638 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx); 639 } 640 641 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { 642 ering->rx_pending = 0; 643 ering->rx_max_pending = 0; 644 ering->rx_mini_pending = 0; 645 ering->rx_jumbo_pending = rx_pending; 646 ering->rx_mini_max_pending = 0; 647 ering->rx_jumbo_max_pending = rx_max_pending; 648 } else { 649 ering->rx_pending = rx_pending; 650 ering->rx_max_pending = rx_max_pending; 651 ering->rx_mini_pending = 0; 652 ering->rx_jumbo_pending = 0; 653 ering->rx_mini_max_pending = 0; 654 ering->rx_jumbo_max_pending = 0; 655 } 656 657 ering->tx_pending = tx_pending; 658 ering->tx_max_pending = tx_max_pending; 659 } 660 661 static u32 lio_get_msglevel(struct net_device *netdev) 662 { 663 struct lio *lio = GET_LIO(netdev); 664 665 return lio->msg_enable; 666 } 667 668 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 669 { 670 struct lio *lio = GET_LIO(netdev); 671 672 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 673 if (msglvl & NETIF_MSG_HW) 674 liquidio_set_feature(netdev, 675 OCTNET_CMD_VERBOSE_ENABLE, 0); 676 else 677 liquidio_set_feature(netdev, 678 OCTNET_CMD_VERBOSE_DISABLE, 0); 679 } 680 681 lio->msg_enable = msglvl; 682 } 683 684 static void 685 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 686 { 687 /* Notes: Not supporting any auto negotiation in these 688 * drivers. Just report pause frame support. 689 */ 690 struct lio *lio = GET_LIO(netdev); 691 struct octeon_device *oct = lio->oct_dev; 692 693 pause->autoneg = 0; 694 695 pause->tx_pause = oct->tx_pause; 696 pause->rx_pause = oct->rx_pause; 697 } 698 699 static int 700 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 701 { 702 /* Notes: Not supporting any auto negotiation in these 703 * drivers. 704 */ 705 struct lio *lio = GET_LIO(netdev); 706 struct octeon_device *oct = lio->oct_dev; 707 struct octnic_ctrl_pkt nctrl; 708 struct oct_link_info *linfo = &lio->linfo; 709 710 int ret = 0; 711 712 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 713 return -EINVAL; 714 715 if (linfo->link.s.duplex == 0) { 716 /*no flow control for half duplex*/ 717 if (pause->rx_pause || pause->tx_pause) 718 return -EINVAL; 719 } 720 721 /*do not support autoneg of link flow control*/ 722 if (pause->autoneg == AUTONEG_ENABLE) 723 return -EINVAL; 724 725 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 726 727 nctrl.ncmd.u64 = 0; 728 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 729 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 730 nctrl.wait_time = 100; 731 nctrl.netpndev = (u64)netdev; 732 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 733 734 if (pause->rx_pause) { 735 /*enable rx pause*/ 736 nctrl.ncmd.s.param1 = 1; 737 } else { 738 /*disable rx pause*/ 739 nctrl.ncmd.s.param1 = 0; 740 } 741 742 if (pause->tx_pause) { 743 /*enable tx pause*/ 744 nctrl.ncmd.s.param2 = 1; 745 } else { 746 /*disable tx pause*/ 747 nctrl.ncmd.s.param2 = 0; 748 } 749 750 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 751 if (ret < 0) { 752 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); 753 return -EINVAL; 754 } 755 756 oct->rx_pause = pause->rx_pause; 757 oct->tx_pause = pause->tx_pause; 758 759 return 0; 760 } 761 762 static void 763 lio_get_ethtool_stats(struct net_device *netdev, 764 struct ethtool_stats *stats __attribute__((unused)), 765 u64 *data) 766 { 767 struct lio *lio = GET_LIO(netdev); 768 struct octeon_device *oct_dev = lio->oct_dev; 769 struct net_device_stats *netstats = &netdev->stats; 770 int i = 0, j; 771 772 netdev->netdev_ops->ndo_get_stats(netdev); 773 octnet_get_link_stats(netdev); 774 775 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 776 data[i++] = CVM_CAST64(netstats->rx_packets); 777 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 778 data[i++] = CVM_CAST64(netstats->tx_packets); 779 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 780 data[i++] = CVM_CAST64(netstats->rx_bytes); 781 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 782 data[i++] = CVM_CAST64(netstats->tx_bytes); 783 data[i++] = CVM_CAST64(netstats->rx_errors); 784 data[i++] = CVM_CAST64(netstats->tx_errors); 785 /*sum of oct->droq[oq_no]->stats->rx_dropped + 786 *oct->droq[oq_no]->stats->dropped_nodispatch + 787 *oct->droq[oq_no]->stats->dropped_toomany + 788 *oct->droq[oq_no]->stats->dropped_nomem 789 */ 790 data[i++] = CVM_CAST64(netstats->rx_dropped); 791 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 792 data[i++] = CVM_CAST64(netstats->tx_dropped); 793 794 /* firmware tx stats */ 795 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 796 *fromhost.fw_total_sent 797 */ 798 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 799 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 800 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 801 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 802 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 803 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 804 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 805 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 806 *fw_err_drop 807 */ 808 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 809 810 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 811 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 812 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 813 *fw_tso_fwd 814 */ 815 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 816 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 817 *fw_err_tso 818 */ 819 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 820 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 821 *fw_tx_vxlan 822 */ 823 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 824 825 /* mac tx statistics */ 826 /*CVMX_BGXX_CMRX_TX_STAT5 */ 827 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 828 /*CVMX_BGXX_CMRX_TX_STAT4 */ 829 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 830 /*CVMX_BGXX_CMRX_TX_STAT15 */ 831 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 832 /*CVMX_BGXX_CMRX_TX_STAT14 */ 833 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 834 /*CVMX_BGXX_CMRX_TX_STAT17 */ 835 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 836 /*CVMX_BGXX_CMRX_TX_STAT0 */ 837 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 838 /*CVMX_BGXX_CMRX_TX_STAT3 */ 839 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 840 /*CVMX_BGXX_CMRX_TX_STAT2 */ 841 data[i++] = 842 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 843 /*CVMX_BGXX_CMRX_TX_STAT0 */ 844 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 845 /*CVMX_BGXX_CMRX_TX_STAT1 */ 846 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 847 /*CVMX_BGXX_CMRX_TX_STAT16 */ 848 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 849 /*CVMX_BGXX_CMRX_TX_STAT6 */ 850 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 851 852 /* RX firmware stats */ 853 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 854 *fw_total_rcvd 855 */ 856 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 857 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 858 *fw_total_fwd 859 */ 860 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 861 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 862 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 863 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 864 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 865 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 866 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 867 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 868 *fw_err_pko 869 */ 870 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 871 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 872 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 873 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 874 *fromwire.fw_err_drop 875 */ 876 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 877 878 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 879 *fromwire.fw_rx_vxlan 880 */ 881 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 882 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 883 *fromwire.fw_rx_vxlan_err 884 */ 885 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 886 887 /* LRO */ 888 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 889 *fw_lro_pkts 890 */ 891 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 892 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 893 *fw_lro_octs 894 */ 895 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 896 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 897 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 898 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 899 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 900 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 901 *fw_lro_aborts_port 902 */ 903 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 904 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 905 *fw_lro_aborts_seq 906 */ 907 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 908 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 909 *fw_lro_aborts_tsval 910 */ 911 data[i++] = 912 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 913 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 914 *fw_lro_aborts_timer 915 */ 916 /* intrmod: packet forward rate */ 917 data[i++] = 918 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 919 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 920 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 921 922 /* mac: link-level stats */ 923 /*CVMX_BGXX_CMRX_RX_STAT0 */ 924 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 925 /*CVMX_BGXX_CMRX_RX_STAT1 */ 926 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 927 /*CVMX_PKI_STATX_STAT5 */ 928 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 929 /*CVMX_PKI_STATX_STAT5 */ 930 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 931 /*wqe->word2.err_code or wqe->word2.err_level */ 932 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 933 /*CVMX_BGXX_CMRX_RX_STAT2 */ 934 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 935 /*CVMX_BGXX_CMRX_RX_STAT6 */ 936 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 937 /*CVMX_BGXX_CMRX_RX_STAT4 */ 938 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 939 /*wqe->word2.err_code or wqe->word2.err_level */ 940 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 941 /*lio->link_changes*/ 942 data[i++] = CVM_CAST64(lio->link_changes); 943 944 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 945 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 946 continue; 947 /*packets to network port*/ 948 /*# of packets tx to network */ 949 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 950 /*# of bytes tx to network */ 951 data[i++] = 952 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 953 /*# of packets dropped */ 954 data[i++] = 955 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 956 /*# of tx fails due to queue full */ 957 data[i++] = 958 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 959 /*XXX gather entries sent */ 960 data[i++] = 961 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 962 963 /*instruction to firmware: data and control */ 964 /*# of instructions to the queue */ 965 data[i++] = 966 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 967 /*# of instructions processed */ 968 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> 969 stats.instr_processed); 970 /*# of instructions could not be processed */ 971 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> 972 stats.instr_dropped); 973 /*bytes sent through the queue */ 974 data[i++] = 975 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 976 977 /*tso request*/ 978 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 979 /*vxlan request*/ 980 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 981 /*txq restart*/ 982 data[i++] = 983 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 984 } 985 986 /* RX */ 987 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 988 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 989 continue; 990 991 /*packets send to TCP/IP network stack */ 992 /*# of packets to network stack */ 993 data[i++] = 994 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 995 /*# of bytes to network stack */ 996 data[i++] = 997 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 998 /*# of packets dropped */ 999 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1000 oct_dev->droq[j]->stats.dropped_toomany + 1001 oct_dev->droq[j]->stats.rx_dropped); 1002 data[i++] = 1003 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1004 data[i++] = 1005 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1006 data[i++] = 1007 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1008 1009 /*control and data path*/ 1010 data[i++] = 1011 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1012 data[i++] = 1013 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1014 data[i++] = 1015 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1016 1017 data[i++] = 1018 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1019 data[i++] = 1020 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1021 } 1022 } 1023 1024 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1025 struct ethtool_stats *stats 1026 __attribute__((unused)), 1027 u64 *data) 1028 { 1029 struct net_device_stats *netstats = &netdev->stats; 1030 struct lio *lio = GET_LIO(netdev); 1031 struct octeon_device *oct_dev = lio->oct_dev; 1032 int i = 0, j, vj; 1033 1034 netdev->netdev_ops->ndo_get_stats(netdev); 1035 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1036 data[i++] = CVM_CAST64(netstats->rx_packets); 1037 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1038 data[i++] = CVM_CAST64(netstats->tx_packets); 1039 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1040 data[i++] = CVM_CAST64(netstats->rx_bytes); 1041 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1042 data[i++] = CVM_CAST64(netstats->tx_bytes); 1043 data[i++] = CVM_CAST64(netstats->rx_errors); 1044 data[i++] = CVM_CAST64(netstats->tx_errors); 1045 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1046 * oct->droq[oq_no]->stats->dropped_nodispatch + 1047 * oct->droq[oq_no]->stats->dropped_toomany + 1048 * oct->droq[oq_no]->stats->dropped_nomem 1049 */ 1050 data[i++] = CVM_CAST64(netstats->rx_dropped); 1051 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1052 data[i++] = CVM_CAST64(netstats->tx_dropped); 1053 /* lio->link_changes */ 1054 data[i++] = CVM_CAST64(lio->link_changes); 1055 1056 for (vj = 0; vj < lio->linfo.num_txpciq; vj++) { 1057 j = lio->linfo.txpciq[vj].s.q_no; 1058 1059 /* packets to network port */ 1060 /* # of packets tx to network */ 1061 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1062 /* # of bytes tx to network */ 1063 data[i++] = CVM_CAST64( 1064 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1065 /* # of packets dropped */ 1066 data[i++] = CVM_CAST64( 1067 oct_dev->instr_queue[j]->stats.tx_dropped); 1068 /* # of tx fails due to queue full */ 1069 data[i++] = CVM_CAST64( 1070 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1071 /* XXX gather entries sent */ 1072 data[i++] = CVM_CAST64( 1073 oct_dev->instr_queue[j]->stats.sgentry_sent); 1074 1075 /* instruction to firmware: data and control */ 1076 /* # of instructions to the queue */ 1077 data[i++] = CVM_CAST64( 1078 oct_dev->instr_queue[j]->stats.instr_posted); 1079 /* # of instructions processed */ 1080 data[i++] = 1081 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1082 /* # of instructions could not be processed */ 1083 data[i++] = 1084 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1085 /* bytes sent through the queue */ 1086 data[i++] = CVM_CAST64( 1087 oct_dev->instr_queue[j]->stats.bytes_sent); 1088 /* tso request */ 1089 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1090 /* vxlan request */ 1091 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1092 /* txq restart */ 1093 data[i++] = CVM_CAST64( 1094 oct_dev->instr_queue[j]->stats.tx_restart); 1095 } 1096 1097 /* RX */ 1098 for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) { 1099 j = lio->linfo.rxpciq[vj].s.q_no; 1100 1101 /* packets send to TCP/IP network stack */ 1102 /* # of packets to network stack */ 1103 data[i++] = CVM_CAST64( 1104 oct_dev->droq[j]->stats.rx_pkts_received); 1105 /* # of bytes to network stack */ 1106 data[i++] = CVM_CAST64( 1107 oct_dev->droq[j]->stats.rx_bytes_received); 1108 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1109 oct_dev->droq[j]->stats.dropped_toomany + 1110 oct_dev->droq[j]->stats.rx_dropped); 1111 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1112 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1113 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1114 1115 /* control and data path */ 1116 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1117 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1118 data[i++] = 1119 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1120 1121 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1122 data[i++] = 1123 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1124 } 1125 } 1126 1127 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1128 { 1129 struct octeon_device *oct_dev = lio->oct_dev; 1130 int i; 1131 1132 switch (oct_dev->chip_id) { 1133 case OCTEON_CN23XX_PF_VID: 1134 case OCTEON_CN23XX_VF_VID: 1135 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1136 sprintf(data, "%s", oct_priv_flags_strings[i]); 1137 data += ETH_GSTRING_LEN; 1138 } 1139 break; 1140 case OCTEON_CN68XX: 1141 case OCTEON_CN66XX: 1142 break; 1143 default: 1144 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1145 break; 1146 } 1147 } 1148 1149 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1150 { 1151 struct lio *lio = GET_LIO(netdev); 1152 struct octeon_device *oct_dev = lio->oct_dev; 1153 int num_iq_stats, num_oq_stats, i, j; 1154 int num_stats; 1155 1156 switch (stringset) { 1157 case ETH_SS_STATS: 1158 num_stats = ARRAY_SIZE(oct_stats_strings); 1159 for (j = 0; j < num_stats; j++) { 1160 sprintf(data, "%s", oct_stats_strings[j]); 1161 data += ETH_GSTRING_LEN; 1162 } 1163 1164 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1165 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1166 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1167 continue; 1168 for (j = 0; j < num_iq_stats; j++) { 1169 sprintf(data, "tx-%d-%s", i, 1170 oct_iq_stats_strings[j]); 1171 data += ETH_GSTRING_LEN; 1172 } 1173 } 1174 1175 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1176 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1177 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1178 continue; 1179 for (j = 0; j < num_oq_stats; j++) { 1180 sprintf(data, "rx-%d-%s", i, 1181 oct_droq_stats_strings[j]); 1182 data += ETH_GSTRING_LEN; 1183 } 1184 } 1185 break; 1186 1187 case ETH_SS_PRIV_FLAGS: 1188 lio_get_priv_flags_strings(lio, data); 1189 break; 1190 default: 1191 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1192 break; 1193 } 1194 } 1195 1196 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1197 u8 *data) 1198 { 1199 int num_iq_stats, num_oq_stats, i, j; 1200 struct lio *lio = GET_LIO(netdev); 1201 struct octeon_device *oct_dev = lio->oct_dev; 1202 int num_stats; 1203 1204 switch (stringset) { 1205 case ETH_SS_STATS: 1206 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1207 for (j = 0; j < num_stats; j++) { 1208 sprintf(data, "%s", oct_vf_stats_strings[j]); 1209 data += ETH_GSTRING_LEN; 1210 } 1211 1212 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1213 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1214 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1215 continue; 1216 for (j = 0; j < num_iq_stats; j++) { 1217 sprintf(data, "tx-%d-%s", i, 1218 oct_iq_stats_strings[j]); 1219 data += ETH_GSTRING_LEN; 1220 } 1221 } 1222 1223 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1224 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1225 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1226 continue; 1227 for (j = 0; j < num_oq_stats; j++) { 1228 sprintf(data, "rx-%d-%s", i, 1229 oct_droq_stats_strings[j]); 1230 data += ETH_GSTRING_LEN; 1231 } 1232 } 1233 break; 1234 1235 case ETH_SS_PRIV_FLAGS: 1236 lio_get_priv_flags_strings(lio, data); 1237 break; 1238 default: 1239 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1240 break; 1241 } 1242 } 1243 1244 static int lio_get_priv_flags_ss_count(struct lio *lio) 1245 { 1246 struct octeon_device *oct_dev = lio->oct_dev; 1247 1248 switch (oct_dev->chip_id) { 1249 case OCTEON_CN23XX_PF_VID: 1250 case OCTEON_CN23XX_VF_VID: 1251 return ARRAY_SIZE(oct_priv_flags_strings); 1252 case OCTEON_CN68XX: 1253 case OCTEON_CN66XX: 1254 return -EOPNOTSUPP; 1255 default: 1256 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1257 return -EOPNOTSUPP; 1258 } 1259 } 1260 1261 static int lio_get_sset_count(struct net_device *netdev, int sset) 1262 { 1263 struct lio *lio = GET_LIO(netdev); 1264 struct octeon_device *oct_dev = lio->oct_dev; 1265 1266 switch (sset) { 1267 case ETH_SS_STATS: 1268 return (ARRAY_SIZE(oct_stats_strings) + 1269 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1270 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1271 case ETH_SS_PRIV_FLAGS: 1272 return lio_get_priv_flags_ss_count(lio); 1273 default: 1274 return -EOPNOTSUPP; 1275 } 1276 } 1277 1278 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 1279 { 1280 struct lio *lio = GET_LIO(netdev); 1281 struct octeon_device *oct_dev = lio->oct_dev; 1282 1283 switch (sset) { 1284 case ETH_SS_STATS: 1285 return (ARRAY_SIZE(oct_vf_stats_strings) + 1286 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1287 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1288 case ETH_SS_PRIV_FLAGS: 1289 return lio_get_priv_flags_ss_count(lio); 1290 default: 1291 return -EOPNOTSUPP; 1292 } 1293 } 1294 1295 static int lio_get_intr_coalesce(struct net_device *netdev, 1296 struct ethtool_coalesce *intr_coal) 1297 { 1298 struct lio *lio = GET_LIO(netdev); 1299 struct octeon_device *oct = lio->oct_dev; 1300 struct octeon_instr_queue *iq; 1301 struct oct_intrmod_cfg *intrmod_cfg; 1302 1303 intrmod_cfg = &oct->intrmod; 1304 1305 switch (oct->chip_id) { 1306 case OCTEON_CN23XX_PF_VID: 1307 case OCTEON_CN23XX_VF_VID: 1308 if (!intrmod_cfg->rx_enable) { 1309 intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs; 1310 intr_coal->rx_max_coalesced_frames = 1311 intrmod_cfg->rx_frames; 1312 } 1313 if (!intrmod_cfg->tx_enable) 1314 intr_coal->tx_max_coalesced_frames = 1315 intrmod_cfg->tx_frames; 1316 break; 1317 case OCTEON_CN68XX: 1318 case OCTEON_CN66XX: { 1319 struct octeon_cn6xxx *cn6xxx = 1320 (struct octeon_cn6xxx *)oct->chip; 1321 1322 if (!intrmod_cfg->rx_enable) { 1323 intr_coal->rx_coalesce_usecs = 1324 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 1325 intr_coal->rx_max_coalesced_frames = 1326 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 1327 } 1328 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 1329 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 1330 break; 1331 } 1332 default: 1333 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1334 return -EINVAL; 1335 } 1336 if (intrmod_cfg->rx_enable) { 1337 intr_coal->use_adaptive_rx_coalesce = 1338 intrmod_cfg->rx_enable; 1339 intr_coal->rate_sample_interval = 1340 intrmod_cfg->check_intrvl; 1341 intr_coal->pkt_rate_high = 1342 intrmod_cfg->maxpkt_ratethr; 1343 intr_coal->pkt_rate_low = 1344 intrmod_cfg->minpkt_ratethr; 1345 intr_coal->rx_max_coalesced_frames_high = 1346 intrmod_cfg->rx_maxcnt_trigger; 1347 intr_coal->rx_coalesce_usecs_high = 1348 intrmod_cfg->rx_maxtmr_trigger; 1349 intr_coal->rx_coalesce_usecs_low = 1350 intrmod_cfg->rx_mintmr_trigger; 1351 intr_coal->rx_max_coalesced_frames_low = 1352 intrmod_cfg->rx_mincnt_trigger; 1353 } 1354 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 1355 (intrmod_cfg->tx_enable)) { 1356 intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable; 1357 intr_coal->tx_max_coalesced_frames_high = 1358 intrmod_cfg->tx_maxcnt_trigger; 1359 intr_coal->tx_max_coalesced_frames_low = 1360 intrmod_cfg->tx_mincnt_trigger; 1361 } 1362 return 0; 1363 } 1364 1365 /* Callback function for intrmod */ 1366 static void octnet_intrmod_callback(struct octeon_device *oct_dev, 1367 u32 status, 1368 void *ptr) 1369 { 1370 struct oct_intrmod_cmd *cmd = ptr; 1371 struct octeon_soft_command *sc = cmd->sc; 1372 1373 oct_dev = cmd->oct_dev; 1374 1375 if (status) 1376 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n", 1377 CVM_CAST64(status)); 1378 else 1379 dev_info(&oct_dev->pci_dev->dev, 1380 "Rx-Adaptive Interrupt moderation enabled:%llx\n", 1381 oct_dev->intrmod.rx_enable); 1382 1383 octeon_free_soft_command(oct_dev, sc); 1384 } 1385 1386 /* Configure interrupt moderation parameters */ 1387 static int octnet_set_intrmod_cfg(struct lio *lio, 1388 struct oct_intrmod_cfg *intr_cfg) 1389 { 1390 struct octeon_soft_command *sc; 1391 struct oct_intrmod_cmd *cmd; 1392 struct oct_intrmod_cfg *cfg; 1393 int retval; 1394 struct octeon_device *oct_dev = lio->oct_dev; 1395 1396 /* Alloc soft command */ 1397 sc = (struct octeon_soft_command *) 1398 octeon_alloc_soft_command(oct_dev, 1399 sizeof(struct oct_intrmod_cfg), 1400 0, 1401 sizeof(struct oct_intrmod_cmd)); 1402 1403 if (!sc) 1404 return -ENOMEM; 1405 1406 cmd = (struct oct_intrmod_cmd *)sc->ctxptr; 1407 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 1408 1409 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 1410 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 1411 cmd->sc = sc; 1412 cmd->cfg = cfg; 1413 cmd->oct_dev = oct_dev; 1414 1415 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1416 1417 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1418 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 1419 1420 sc->callback = octnet_intrmod_callback; 1421 sc->callback_arg = cmd; 1422 sc->wait_time = 1000; 1423 1424 retval = octeon_send_soft_command(oct_dev, sc); 1425 if (retval == IQ_SEND_FAILED) { 1426 octeon_free_soft_command(oct_dev, sc); 1427 return -EINVAL; 1428 } 1429 1430 return 0; 1431 } 1432 1433 static void 1434 octnet_nic_stats_callback(struct octeon_device *oct_dev, 1435 u32 status, void *ptr) 1436 { 1437 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 1438 struct oct_nic_stats_resp *resp = 1439 (struct oct_nic_stats_resp *)sc->virtrptr; 1440 struct oct_nic_stats_ctrl *ctrl = 1441 (struct oct_nic_stats_ctrl *)sc->ctxptr; 1442 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; 1443 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; 1444 1445 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; 1446 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; 1447 1448 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { 1449 octeon_swap_8B_data((u64 *)&resp->stats, 1450 (sizeof(struct oct_link_stats)) >> 3); 1451 1452 /* RX link-level stats */ 1453 rstats->total_rcvd = rsp_rstats->total_rcvd; 1454 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; 1455 rstats->total_bcst = rsp_rstats->total_bcst; 1456 rstats->total_mcst = rsp_rstats->total_mcst; 1457 rstats->runts = rsp_rstats->runts; 1458 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; 1459 /* Accounts for over/under-run of buffers */ 1460 rstats->fifo_err = rsp_rstats->fifo_err; 1461 rstats->dmac_drop = rsp_rstats->dmac_drop; 1462 rstats->fcs_err = rsp_rstats->fcs_err; 1463 rstats->jabber_err = rsp_rstats->jabber_err; 1464 rstats->l2_err = rsp_rstats->l2_err; 1465 rstats->frame_err = rsp_rstats->frame_err; 1466 1467 /* RX firmware stats */ 1468 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; 1469 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; 1470 rstats->fw_err_pko = rsp_rstats->fw_err_pko; 1471 rstats->fw_err_link = rsp_rstats->fw_err_link; 1472 rstats->fw_err_drop = rsp_rstats->fw_err_drop; 1473 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; 1474 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; 1475 1476 /* Number of packets that are LROed */ 1477 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; 1478 /* Number of octets that are LROed */ 1479 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; 1480 /* Number of LRO packets formed */ 1481 rstats->fw_total_lro = rsp_rstats->fw_total_lro; 1482 /* Number of times lRO of packet aborted */ 1483 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; 1484 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; 1485 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; 1486 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; 1487 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; 1488 /* intrmod: packet forward rate */ 1489 rstats->fwd_rate = rsp_rstats->fwd_rate; 1490 1491 /* TX link-level stats */ 1492 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; 1493 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; 1494 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; 1495 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; 1496 tstats->ctl_sent = rsp_tstats->ctl_sent; 1497 /* Packets sent after one collision*/ 1498 tstats->one_collision_sent = rsp_tstats->one_collision_sent; 1499 /* Packets sent after multiple collision*/ 1500 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; 1501 /* Packets not sent due to max collisions */ 1502 tstats->max_collision_fail = rsp_tstats->max_collision_fail; 1503 /* Packets not sent due to max deferrals */ 1504 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; 1505 /* Accounts for over/under-run of buffers */ 1506 tstats->fifo_err = rsp_tstats->fifo_err; 1507 tstats->runts = rsp_tstats->runts; 1508 /* Total number of collisions detected */ 1509 tstats->total_collisions = rsp_tstats->total_collisions; 1510 1511 /* firmware stats */ 1512 tstats->fw_total_sent = rsp_tstats->fw_total_sent; 1513 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; 1514 tstats->fw_err_pko = rsp_tstats->fw_err_pko; 1515 tstats->fw_err_link = rsp_tstats->fw_err_link; 1516 tstats->fw_err_drop = rsp_tstats->fw_err_drop; 1517 tstats->fw_tso = rsp_tstats->fw_tso; 1518 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; 1519 tstats->fw_err_tso = rsp_tstats->fw_err_tso; 1520 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; 1521 1522 resp->status = 1; 1523 } else { 1524 resp->status = -1; 1525 } 1526 complete(&ctrl->complete); 1527 } 1528 1529 /* Configure interrupt moderation parameters */ 1530 static int octnet_get_link_stats(struct net_device *netdev) 1531 { 1532 struct lio *lio = GET_LIO(netdev); 1533 struct octeon_device *oct_dev = lio->oct_dev; 1534 1535 struct octeon_soft_command *sc; 1536 struct oct_nic_stats_ctrl *ctrl; 1537 struct oct_nic_stats_resp *resp; 1538 1539 int retval; 1540 1541 /* Alloc soft command */ 1542 sc = (struct octeon_soft_command *) 1543 octeon_alloc_soft_command(oct_dev, 1544 0, 1545 sizeof(struct oct_nic_stats_resp), 1546 sizeof(struct octnic_ctrl_pkt)); 1547 1548 if (!sc) 1549 return -ENOMEM; 1550 1551 resp = (struct oct_nic_stats_resp *)sc->virtrptr; 1552 memset(resp, 0, sizeof(struct oct_nic_stats_resp)); 1553 1554 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; 1555 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); 1556 ctrl->netdev = netdev; 1557 init_completion(&ctrl->complete); 1558 1559 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1560 1561 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1562 OPCODE_NIC_PORT_STATS, 0, 0, 0); 1563 1564 sc->callback = octnet_nic_stats_callback; 1565 sc->callback_arg = sc; 1566 sc->wait_time = 500; /*in milli seconds*/ 1567 1568 retval = octeon_send_soft_command(oct_dev, sc); 1569 if (retval == IQ_SEND_FAILED) { 1570 octeon_free_soft_command(oct_dev, sc); 1571 return -EINVAL; 1572 } 1573 1574 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); 1575 1576 if (resp->status != 1) { 1577 octeon_free_soft_command(oct_dev, sc); 1578 1579 return -EINVAL; 1580 } 1581 1582 octeon_free_soft_command(oct_dev, sc); 1583 1584 return 0; 1585 } 1586 1587 /* Enable/Disable auto interrupt Moderation */ 1588 static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce 1589 *intr_coal) 1590 { 1591 int ret = 0; 1592 struct octeon_device *oct = lio->oct_dev; 1593 struct oct_intrmod_cfg *intrmod_cfg; 1594 1595 intrmod_cfg = &oct->intrmod; 1596 1597 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) { 1598 if (intr_coal->rate_sample_interval) 1599 intrmod_cfg->check_intrvl = 1600 intr_coal->rate_sample_interval; 1601 else 1602 intrmod_cfg->check_intrvl = 1603 LIO_INTRMOD_CHECK_INTERVAL; 1604 1605 if (intr_coal->pkt_rate_high) 1606 intrmod_cfg->maxpkt_ratethr = 1607 intr_coal->pkt_rate_high; 1608 else 1609 intrmod_cfg->maxpkt_ratethr = 1610 LIO_INTRMOD_MAXPKT_RATETHR; 1611 1612 if (intr_coal->pkt_rate_low) 1613 intrmod_cfg->minpkt_ratethr = 1614 intr_coal->pkt_rate_low; 1615 else 1616 intrmod_cfg->minpkt_ratethr = 1617 LIO_INTRMOD_MINPKT_RATETHR; 1618 } 1619 if (oct->intrmod.rx_enable) { 1620 if (intr_coal->rx_max_coalesced_frames_high) 1621 intrmod_cfg->rx_maxcnt_trigger = 1622 intr_coal->rx_max_coalesced_frames_high; 1623 else 1624 intrmod_cfg->rx_maxcnt_trigger = 1625 LIO_INTRMOD_RXMAXCNT_TRIGGER; 1626 1627 if (intr_coal->rx_coalesce_usecs_high) 1628 intrmod_cfg->rx_maxtmr_trigger = 1629 intr_coal->rx_coalesce_usecs_high; 1630 else 1631 intrmod_cfg->rx_maxtmr_trigger = 1632 LIO_INTRMOD_RXMAXTMR_TRIGGER; 1633 1634 if (intr_coal->rx_coalesce_usecs_low) 1635 intrmod_cfg->rx_mintmr_trigger = 1636 intr_coal->rx_coalesce_usecs_low; 1637 else 1638 intrmod_cfg->rx_mintmr_trigger = 1639 LIO_INTRMOD_RXMINTMR_TRIGGER; 1640 1641 if (intr_coal->rx_max_coalesced_frames_low) 1642 intrmod_cfg->rx_mincnt_trigger = 1643 intr_coal->rx_max_coalesced_frames_low; 1644 else 1645 intrmod_cfg->rx_mincnt_trigger = 1646 LIO_INTRMOD_RXMINCNT_TRIGGER; 1647 } 1648 if (oct->intrmod.tx_enable) { 1649 if (intr_coal->tx_max_coalesced_frames_high) 1650 intrmod_cfg->tx_maxcnt_trigger = 1651 intr_coal->tx_max_coalesced_frames_high; 1652 else 1653 intrmod_cfg->tx_maxcnt_trigger = 1654 LIO_INTRMOD_TXMAXCNT_TRIGGER; 1655 if (intr_coal->tx_max_coalesced_frames_low) 1656 intrmod_cfg->tx_mincnt_trigger = 1657 intr_coal->tx_max_coalesced_frames_low; 1658 else 1659 intrmod_cfg->tx_mincnt_trigger = 1660 LIO_INTRMOD_TXMINCNT_TRIGGER; 1661 } 1662 1663 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 1664 1665 return ret; 1666 } 1667 1668 static int 1669 oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) 1670 { 1671 struct octeon_device *oct = lio->oct_dev; 1672 u32 rx_max_coalesced_frames; 1673 1674 /* Config Cnt based interrupt values */ 1675 switch (oct->chip_id) { 1676 case OCTEON_CN68XX: 1677 case OCTEON_CN66XX: { 1678 struct octeon_cn6xxx *cn6xxx = 1679 (struct octeon_cn6xxx *)oct->chip; 1680 1681 if (!intr_coal->rx_max_coalesced_frames) 1682 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 1683 else 1684 rx_max_coalesced_frames = 1685 intr_coal->rx_max_coalesced_frames; 1686 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 1687 rx_max_coalesced_frames); 1688 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 1689 break; 1690 } 1691 case OCTEON_CN23XX_PF_VID: { 1692 int q_no; 1693 1694 if (!intr_coal->rx_max_coalesced_frames) 1695 rx_max_coalesced_frames = oct->intrmod.rx_frames; 1696 else 1697 rx_max_coalesced_frames = 1698 intr_coal->rx_max_coalesced_frames; 1699 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1700 q_no += oct->sriov_info.pf_srn; 1701 octeon_write_csr64( 1702 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 1703 (octeon_read_csr64( 1704 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 1705 (0x3fffff00000000UL)) | 1706 rx_max_coalesced_frames); 1707 /*consider setting resend bit*/ 1708 } 1709 oct->intrmod.rx_frames = rx_max_coalesced_frames; 1710 break; 1711 } 1712 case OCTEON_CN23XX_VF_VID: { 1713 int q_no; 1714 1715 if (!intr_coal->rx_max_coalesced_frames) 1716 rx_max_coalesced_frames = oct->intrmod.rx_frames; 1717 else 1718 rx_max_coalesced_frames = 1719 intr_coal->rx_max_coalesced_frames; 1720 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1721 octeon_write_csr64( 1722 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 1723 (octeon_read_csr64( 1724 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 1725 (0x3fffff00000000UL)) | 1726 rx_max_coalesced_frames); 1727 /* consider writing to resend bit here */ 1728 } 1729 oct->intrmod.rx_frames = rx_max_coalesced_frames; 1730 break; 1731 } 1732 default: 1733 return -EINVAL; 1734 } 1735 return 0; 1736 } 1737 1738 static int oct_cfg_rx_intrtime(struct lio *lio, 1739 struct ethtool_coalesce *intr_coal) 1740 { 1741 struct octeon_device *oct = lio->oct_dev; 1742 u32 time_threshold, rx_coalesce_usecs; 1743 1744 /* Config Time based interrupt values */ 1745 switch (oct->chip_id) { 1746 case OCTEON_CN68XX: 1747 case OCTEON_CN66XX: { 1748 struct octeon_cn6xxx *cn6xxx = 1749 (struct octeon_cn6xxx *)oct->chip; 1750 if (!intr_coal->rx_coalesce_usecs) 1751 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 1752 else 1753 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1754 1755 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 1756 rx_coalesce_usecs); 1757 octeon_write_csr(oct, 1758 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 1759 time_threshold); 1760 1761 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 1762 break; 1763 } 1764 case OCTEON_CN23XX_PF_VID: { 1765 u64 time_threshold; 1766 int q_no; 1767 1768 if (!intr_coal->rx_coalesce_usecs) 1769 rx_coalesce_usecs = oct->intrmod.rx_usecs; 1770 else 1771 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1772 time_threshold = 1773 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 1774 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1775 q_no += oct->sriov_info.pf_srn; 1776 octeon_write_csr64(oct, 1777 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 1778 (oct->intrmod.rx_frames | 1779 (time_threshold << 32))); 1780 /*consider writing to resend bit here*/ 1781 } 1782 oct->intrmod.rx_usecs = rx_coalesce_usecs; 1783 break; 1784 } 1785 case OCTEON_CN23XX_VF_VID: { 1786 u64 time_threshold; 1787 int q_no; 1788 1789 if (!intr_coal->rx_coalesce_usecs) 1790 rx_coalesce_usecs = oct->intrmod.rx_usecs; 1791 else 1792 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 1793 1794 time_threshold = 1795 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 1796 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 1797 octeon_write_csr64( 1798 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 1799 (oct->intrmod.rx_frames | 1800 (time_threshold << 32))); 1801 /* consider setting resend bit */ 1802 } 1803 oct->intrmod.rx_usecs = rx_coalesce_usecs; 1804 break; 1805 } 1806 default: 1807 return -EINVAL; 1808 } 1809 1810 return 0; 1811 } 1812 1813 static int 1814 oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal 1815 __attribute__((unused))) 1816 { 1817 struct octeon_device *oct = lio->oct_dev; 1818 u32 iq_intr_pkt; 1819 void __iomem *inst_cnt_reg; 1820 u64 val; 1821 1822 /* Config Cnt based interrupt values */ 1823 switch (oct->chip_id) { 1824 case OCTEON_CN68XX: 1825 case OCTEON_CN66XX: 1826 break; 1827 case OCTEON_CN23XX_VF_VID: 1828 case OCTEON_CN23XX_PF_VID: { 1829 int q_no; 1830 1831 if (!intr_coal->tx_max_coalesced_frames) 1832 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 1833 CN23XX_PKT_IN_DONE_WMARK_MASK; 1834 else 1835 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 1836 CN23XX_PKT_IN_DONE_WMARK_MASK; 1837 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 1838 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 1839 val = readq(inst_cnt_reg); 1840 /*clear wmark and count.dont want to write count back*/ 1841 val = (val & 0xFFFF000000000000ULL) | 1842 ((u64)iq_intr_pkt 1843 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 1844 writeq(val, inst_cnt_reg); 1845 /*consider setting resend bit*/ 1846 } 1847 oct->intrmod.tx_frames = iq_intr_pkt; 1848 break; 1849 } 1850 default: 1851 return -EINVAL; 1852 } 1853 return 0; 1854 } 1855 1856 static int lio_set_intr_coalesce(struct net_device *netdev, 1857 struct ethtool_coalesce *intr_coal) 1858 { 1859 struct lio *lio = GET_LIO(netdev); 1860 int ret; 1861 struct octeon_device *oct = lio->oct_dev; 1862 u32 j, q_no; 1863 int db_max, db_min; 1864 1865 switch (oct->chip_id) { 1866 case OCTEON_CN68XX: 1867 case OCTEON_CN66XX: 1868 db_min = CN6XXX_DB_MIN; 1869 db_max = CN6XXX_DB_MAX; 1870 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 1871 (intr_coal->tx_max_coalesced_frames <= db_max)) { 1872 for (j = 0; j < lio->linfo.num_txpciq; j++) { 1873 q_no = lio->linfo.txpciq[j].s.q_no; 1874 oct->instr_queue[q_no]->fill_threshold = 1875 intr_coal->tx_max_coalesced_frames; 1876 } 1877 } else { 1878 dev_err(&oct->pci_dev->dev, 1879 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 1880 intr_coal->tx_max_coalesced_frames, db_min, 1881 db_max); 1882 return -EINVAL; 1883 } 1884 break; 1885 case OCTEON_CN23XX_PF_VID: 1886 case OCTEON_CN23XX_VF_VID: 1887 break; 1888 default: 1889 return -EINVAL; 1890 } 1891 1892 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 1893 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 1894 1895 ret = oct_cfg_adaptive_intr(lio, intr_coal); 1896 1897 if (!intr_coal->use_adaptive_rx_coalesce) { 1898 ret = oct_cfg_rx_intrtime(lio, intr_coal); 1899 if (ret) 1900 goto ret_intrmod; 1901 1902 ret = oct_cfg_rx_intrcnt(lio, intr_coal); 1903 if (ret) 1904 goto ret_intrmod; 1905 } 1906 if (!intr_coal->use_adaptive_tx_coalesce) { 1907 ret = oct_cfg_tx_intrcnt(lio, intr_coal); 1908 if (ret) 1909 goto ret_intrmod; 1910 } 1911 1912 return 0; 1913 ret_intrmod: 1914 return ret; 1915 } 1916 1917 static int lio_get_ts_info(struct net_device *netdev, 1918 struct ethtool_ts_info *info) 1919 { 1920 struct lio *lio = GET_LIO(netdev); 1921 1922 info->so_timestamping = 1923 #ifdef PTP_HARDWARE_TIMESTAMPING 1924 SOF_TIMESTAMPING_TX_HARDWARE | 1925 SOF_TIMESTAMPING_RX_HARDWARE | 1926 SOF_TIMESTAMPING_RAW_HARDWARE | 1927 SOF_TIMESTAMPING_TX_SOFTWARE | 1928 #endif 1929 SOF_TIMESTAMPING_RX_SOFTWARE | 1930 SOF_TIMESTAMPING_SOFTWARE; 1931 1932 if (lio->ptp_clock) 1933 info->phc_index = ptp_clock_index(lio->ptp_clock); 1934 else 1935 info->phc_index = -1; 1936 1937 #ifdef PTP_HARDWARE_TIMESTAMPING 1938 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 1939 1940 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1941 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 1942 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1943 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 1944 #endif 1945 1946 return 0; 1947 } 1948 1949 /* Return register dump len. */ 1950 static int lio_get_regs_len(struct net_device *dev) 1951 { 1952 struct lio *lio = GET_LIO(dev); 1953 struct octeon_device *oct = lio->oct_dev; 1954 1955 switch (oct->chip_id) { 1956 case OCTEON_CN23XX_PF_VID: 1957 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 1958 case OCTEON_CN23XX_VF_VID: 1959 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 1960 default: 1961 return OCT_ETHTOOL_REGDUMP_LEN; 1962 } 1963 } 1964 1965 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 1966 { 1967 u32 reg; 1968 u8 pf_num = oct->pf_num; 1969 int len = 0; 1970 int i; 1971 1972 /* PCI Window Registers */ 1973 1974 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 1975 1976 /*0x29030 or 0x29040*/ 1977 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 1978 len += sprintf(s + len, 1979 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 1980 reg, oct->pcie_port, oct->pf_num, 1981 (u64)octeon_read_csr64(oct, reg)); 1982 1983 /*0x27080 or 0x27090*/ 1984 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 1985 len += 1986 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 1987 reg, oct->pcie_port, oct->pf_num, 1988 (u64)octeon_read_csr64(oct, reg)); 1989 1990 /*0x27000 or 0x27010*/ 1991 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 1992 len += 1993 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 1994 reg, oct->pcie_port, oct->pf_num, 1995 (u64)octeon_read_csr64(oct, reg)); 1996 1997 /*0x29120*/ 1998 reg = 0x29120; 1999 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2000 (u64)octeon_read_csr64(oct, reg)); 2001 2002 /*0x27300*/ 2003 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2004 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2005 len += sprintf( 2006 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2007 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2008 2009 /*0x27200*/ 2010 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2011 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2012 len += sprintf(s + len, 2013 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2014 reg, oct->pcie_port, oct->pf_num, 2015 (u64)octeon_read_csr64(oct, reg)); 2016 2017 /*29130*/ 2018 reg = CN23XX_SLI_PKT_CNT_INT; 2019 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2020 (u64)octeon_read_csr64(oct, reg)); 2021 2022 /*0x29140*/ 2023 reg = CN23XX_SLI_PKT_TIME_INT; 2024 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2025 (u64)octeon_read_csr64(oct, reg)); 2026 2027 /*0x29160*/ 2028 reg = 0x29160; 2029 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2030 (u64)octeon_read_csr64(oct, reg)); 2031 2032 /*0x29180*/ 2033 reg = CN23XX_SLI_OQ_WMARK; 2034 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2035 reg, (u64)octeon_read_csr64(oct, reg)); 2036 2037 /*0x291E0*/ 2038 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2039 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2040 (u64)octeon_read_csr64(oct, reg)); 2041 2042 /*0x29210*/ 2043 reg = CN23XX_SLI_GBL_CONTROL; 2044 len += sprintf(s + len, 2045 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2046 (u64)octeon_read_csr64(oct, reg)); 2047 2048 /*0x29220*/ 2049 reg = 0x29220; 2050 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2051 reg, (u64)octeon_read_csr64(oct, reg)); 2052 2053 /*PF only*/ 2054 if (pf_num == 0) { 2055 /*0x29260*/ 2056 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2057 len += sprintf(s + len, 2058 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2059 reg, (u64)octeon_read_csr64(oct, reg)); 2060 } else if (pf_num == 1) { 2061 /*0x29270*/ 2062 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2063 len += sprintf(s + len, 2064 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2065 reg, (u64)octeon_read_csr64(oct, reg)); 2066 } 2067 2068 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2069 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2070 len += 2071 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2072 reg, i, (u64)octeon_read_csr64(oct, reg)); 2073 } 2074 2075 /*0x10040*/ 2076 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2077 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2078 len += sprintf(s + len, 2079 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2080 reg, i, (u64)octeon_read_csr64(oct, reg)); 2081 } 2082 2083 /*0x10080*/ 2084 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2085 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2086 len += sprintf(s + len, 2087 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2088 reg, i, (u64)octeon_read_csr64(oct, reg)); 2089 } 2090 2091 /*0x10090*/ 2092 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2093 reg = CN23XX_SLI_OQ_SIZE(i); 2094 len += sprintf( 2095 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2096 reg, i, (u64)octeon_read_csr64(oct, reg)); 2097 } 2098 2099 /*0x10050*/ 2100 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2101 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2102 len += sprintf( 2103 s + len, 2104 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2105 reg, i, (u64)octeon_read_csr64(oct, reg)); 2106 } 2107 2108 /*0x10070*/ 2109 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2110 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2111 len += sprintf(s + len, 2112 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2113 reg, i, (u64)octeon_read_csr64(oct, reg)); 2114 } 2115 2116 /*0x100a0*/ 2117 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2118 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2119 len += sprintf(s + len, 2120 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2121 reg, i, (u64)octeon_read_csr64(oct, reg)); 2122 } 2123 2124 /*0x100b0*/ 2125 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2126 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2127 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2128 reg, i, (u64)octeon_read_csr64(oct, reg)); 2129 } 2130 2131 /*0x100c0*/ 2132 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2133 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2134 len += sprintf(s + len, 2135 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2136 reg, i, (u64)octeon_read_csr64(oct, reg)); 2137 2138 /*0x10000*/ 2139 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2140 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2141 len += sprintf( 2142 s + len, 2143 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2144 reg, i, (u64)octeon_read_csr64(oct, reg)); 2145 } 2146 2147 /*0x10010*/ 2148 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2149 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2150 len += sprintf( 2151 s + len, 2152 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2153 i, (u64)octeon_read_csr64(oct, reg)); 2154 } 2155 2156 /*0x10020*/ 2157 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2158 reg = CN23XX_SLI_IQ_DOORBELL(i); 2159 len += sprintf( 2160 s + len, 2161 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2162 reg, i, (u64)octeon_read_csr64(oct, reg)); 2163 } 2164 2165 /*0x10030*/ 2166 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2167 reg = CN23XX_SLI_IQ_SIZE(i); 2168 len += sprintf( 2169 s + len, 2170 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2171 reg, i, (u64)octeon_read_csr64(oct, reg)); 2172 } 2173 2174 /*0x10040*/ 2175 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2176 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2177 len += sprintf(s + len, 2178 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2179 reg, i, (u64)octeon_read_csr64(oct, reg)); 2180 } 2181 2182 return len; 2183 } 2184 2185 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2186 { 2187 int len = 0; 2188 u32 reg; 2189 int i; 2190 2191 /* PCI Window Registers */ 2192 2193 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2194 2195 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2196 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2197 len += sprintf(s + len, 2198 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2199 reg, i, (u64)octeon_read_csr64(oct, reg)); 2200 } 2201 2202 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2203 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2204 len += sprintf(s + len, 2205 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2206 reg, i, (u64)octeon_read_csr64(oct, reg)); 2207 } 2208 2209 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2210 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2211 len += sprintf(s + len, 2212 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2213 reg, i, (u64)octeon_read_csr64(oct, reg)); 2214 } 2215 2216 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2217 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2218 len += sprintf(s + len, 2219 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2220 reg, i, (u64)octeon_read_csr64(oct, reg)); 2221 } 2222 2223 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2224 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2225 len += sprintf(s + len, 2226 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2227 reg, i, (u64)octeon_read_csr64(oct, reg)); 2228 } 2229 2230 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2231 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2232 len += sprintf(s + len, 2233 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2234 reg, i, (u64)octeon_read_csr64(oct, reg)); 2235 } 2236 2237 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2238 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2239 len += sprintf(s + len, 2240 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2241 reg, i, (u64)octeon_read_csr64(oct, reg)); 2242 } 2243 2244 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2245 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2246 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2247 reg, i, (u64)octeon_read_csr64(oct, reg)); 2248 } 2249 2250 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2251 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2252 len += sprintf(s + len, 2253 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2254 reg, i, (u64)octeon_read_csr64(oct, reg)); 2255 } 2256 2257 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2258 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2259 len += sprintf(s + len, 2260 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2261 reg, i, (u64)octeon_read_csr64(oct, reg)); 2262 } 2263 2264 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2265 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2266 len += sprintf(s + len, 2267 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2268 reg, i, (u64)octeon_read_csr64(oct, reg)); 2269 } 2270 2271 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2272 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2273 len += sprintf(s + len, 2274 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2275 reg, i, (u64)octeon_read_csr64(oct, reg)); 2276 } 2277 2278 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2279 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2280 len += sprintf(s + len, 2281 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2282 reg, i, (u64)octeon_read_csr64(oct, reg)); 2283 } 2284 2285 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2286 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2287 len += sprintf(s + len, 2288 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2289 reg, i, (u64)octeon_read_csr64(oct, reg)); 2290 } 2291 2292 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2293 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2294 len += sprintf(s + len, 2295 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2296 reg, i, (u64)octeon_read_csr64(oct, reg)); 2297 } 2298 2299 return len; 2300 } 2301 2302 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2303 { 2304 u32 reg; 2305 int i, len = 0; 2306 2307 /* PCI Window Registers */ 2308 2309 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2310 reg = CN6XXX_WIN_WR_ADDR_LO; 2311 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2312 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2313 reg = CN6XXX_WIN_WR_ADDR_HI; 2314 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2315 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2316 reg = CN6XXX_WIN_RD_ADDR_LO; 2317 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2318 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2319 reg = CN6XXX_WIN_RD_ADDR_HI; 2320 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2321 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2322 reg = CN6XXX_WIN_WR_DATA_LO; 2323 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2324 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2325 reg = CN6XXX_WIN_WR_DATA_HI; 2326 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2327 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2328 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2329 CN6XXX_WIN_WR_MASK_REG, 2330 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2331 2332 /* PCI Interrupt Register */ 2333 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2334 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2335 CN6XXX_SLI_INT_ENB64_PORT0)); 2336 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2337 CN6XXX_SLI_INT_ENB64_PORT1, 2338 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2339 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2340 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2341 2342 /* PCI Output queue registers */ 2343 for (i = 0; i < oct->num_oqs; i++) { 2344 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2345 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2346 reg, i, octeon_read_csr(oct, reg)); 2347 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2348 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2349 reg, i, octeon_read_csr(oct, reg)); 2350 } 2351 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2352 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2353 reg, octeon_read_csr(oct, reg)); 2354 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 2355 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 2356 reg, octeon_read_csr(oct, reg)); 2357 2358 /* PCI Input queue registers */ 2359 for (i = 0; i <= 3; i++) { 2360 u32 reg; 2361 2362 reg = CN6XXX_SLI_IQ_DOORBELL(i); 2363 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 2364 reg, i, octeon_read_csr(oct, reg)); 2365 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 2366 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 2367 reg, i, octeon_read_csr(oct, reg)); 2368 } 2369 2370 /* PCI DMA registers */ 2371 2372 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 2373 CN6XXX_DMA_CNT(0), 2374 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 2375 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 2376 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 2377 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 2378 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 2379 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 2380 CN6XXX_DMA_TIME_INT_LEVEL(0), 2381 octeon_read_csr(oct, reg)); 2382 2383 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 2384 CN6XXX_DMA_CNT(1), 2385 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 2386 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2387 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 2388 CN6XXX_DMA_PKT_INT_LEVEL(1), 2389 octeon_read_csr(oct, reg)); 2390 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2391 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 2392 CN6XXX_DMA_TIME_INT_LEVEL(1), 2393 octeon_read_csr(oct, reg)); 2394 2395 /* PCI Index registers */ 2396 2397 len += sprintf(s + len, "\n"); 2398 2399 for (i = 0; i < 16; i++) { 2400 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 2401 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 2402 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 2403 } 2404 2405 return len; 2406 } 2407 2408 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 2409 { 2410 u32 val; 2411 int i, len = 0; 2412 2413 /* PCI CONFIG Registers */ 2414 2415 len += sprintf(s + len, 2416 "\n\t Octeon Config space Registers\n\n"); 2417 2418 for (i = 0; i <= 13; i++) { 2419 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2420 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2421 (i * 4), i, val); 2422 } 2423 2424 for (i = 30; i <= 34; i++) { 2425 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2426 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2427 (i * 4), i, val); 2428 } 2429 2430 return len; 2431 } 2432 2433 /* Return register dump user app. */ 2434 static void lio_get_regs(struct net_device *dev, 2435 struct ethtool_regs *regs, void *regbuf) 2436 { 2437 struct lio *lio = GET_LIO(dev); 2438 int len = 0; 2439 struct octeon_device *oct = lio->oct_dev; 2440 2441 regs->version = OCT_ETHTOOL_REGSVER; 2442 2443 switch (oct->chip_id) { 2444 case OCTEON_CN23XX_PF_VID: 2445 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 2446 len += cn23xx_read_csr_reg(regbuf + len, oct); 2447 break; 2448 case OCTEON_CN23XX_VF_VID: 2449 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 2450 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 2451 break; 2452 case OCTEON_CN68XX: 2453 case OCTEON_CN66XX: 2454 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 2455 len += cn6xxx_read_csr_reg(regbuf + len, oct); 2456 len += cn6xxx_read_config_reg(regbuf + len, oct); 2457 break; 2458 default: 2459 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 2460 __func__, oct->chip_id); 2461 } 2462 } 2463 2464 static u32 lio_get_priv_flags(struct net_device *netdev) 2465 { 2466 struct lio *lio = GET_LIO(netdev); 2467 2468 return lio->oct_dev->priv_flags; 2469 } 2470 2471 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 2472 { 2473 struct lio *lio = GET_LIO(netdev); 2474 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 2475 2476 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 2477 intr_by_tx_bytes); 2478 return 0; 2479 } 2480 2481 static const struct ethtool_ops lio_ethtool_ops = { 2482 .get_link_ksettings = lio_get_link_ksettings, 2483 .get_link = ethtool_op_get_link, 2484 .get_drvinfo = lio_get_drvinfo, 2485 .get_ringparam = lio_ethtool_get_ringparam, 2486 .get_channels = lio_ethtool_get_channels, 2487 .set_phys_id = lio_set_phys_id, 2488 .get_eeprom_len = lio_get_eeprom_len, 2489 .get_eeprom = lio_get_eeprom, 2490 .get_strings = lio_get_strings, 2491 .get_ethtool_stats = lio_get_ethtool_stats, 2492 .get_pauseparam = lio_get_pauseparam, 2493 .set_pauseparam = lio_set_pauseparam, 2494 .get_regs_len = lio_get_regs_len, 2495 .get_regs = lio_get_regs, 2496 .get_msglevel = lio_get_msglevel, 2497 .set_msglevel = lio_set_msglevel, 2498 .get_sset_count = lio_get_sset_count, 2499 .get_coalesce = lio_get_intr_coalesce, 2500 .set_coalesce = lio_set_intr_coalesce, 2501 .get_priv_flags = lio_get_priv_flags, 2502 .set_priv_flags = lio_set_priv_flags, 2503 .get_ts_info = lio_get_ts_info, 2504 }; 2505 2506 static const struct ethtool_ops lio_vf_ethtool_ops = { 2507 .get_link_ksettings = lio_get_link_ksettings, 2508 .get_link = ethtool_op_get_link, 2509 .get_drvinfo = lio_get_vf_drvinfo, 2510 .get_ringparam = lio_ethtool_get_ringparam, 2511 .get_channels = lio_ethtool_get_channels, 2512 .get_strings = lio_vf_get_strings, 2513 .get_ethtool_stats = lio_vf_get_ethtool_stats, 2514 .get_regs_len = lio_get_regs_len, 2515 .get_regs = lio_get_regs, 2516 .get_msglevel = lio_get_msglevel, 2517 .set_msglevel = lio_set_msglevel, 2518 .get_sset_count = lio_vf_get_sset_count, 2519 .get_coalesce = lio_get_intr_coalesce, 2520 .set_coalesce = lio_set_intr_coalesce, 2521 .get_priv_flags = lio_get_priv_flags, 2522 .set_priv_flags = lio_set_priv_flags, 2523 .get_ts_info = lio_get_ts_info, 2524 }; 2525 2526 void liquidio_set_ethtool_ops(struct net_device *netdev) 2527 { 2528 struct lio *lio = GET_LIO(netdev); 2529 struct octeon_device *oct = lio->oct_dev; 2530 2531 if (OCTEON_CN23XX_VF(oct)) 2532 netdev->ethtool_ops = &lio_vf_ethtool_ops; 2533 else 2534 netdev->ethtool_ops = &lio_ethtool_ops; 2535 } 2536