1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/netdevice.h> 19 #include <linux/net_tstamp.h> 20 #include <linux/pci.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_nic.h" 27 #include "octeon_main.h" 28 #include "octeon_network.h" 29 #include "cn66xx_regs.h" 30 #include "cn66xx_device.h" 31 #include "cn23xx_pf_device.h" 32 #include "cn23xx_vf_device.h" 33 34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); 35 static int octnet_get_link_stats(struct net_device *netdev); 36 37 struct oct_intrmod_context { 38 int octeon_id; 39 wait_queue_head_t wc; 40 int cond; 41 int status; 42 }; 43 44 struct oct_intrmod_resp { 45 u64 rh; 46 struct oct_intrmod_cfg intrmod; 47 u64 status; 48 }; 49 50 struct oct_mdio_cmd_context { 51 int octeon_id; 52 wait_queue_head_t wc; 53 int cond; 54 }; 55 56 struct oct_mdio_cmd_resp { 57 u64 rh; 58 struct oct_mdio_cmd resp; 59 u64 status; 60 }; 61 62 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) 63 64 /* Octeon's interface mode of operation */ 65 enum { 66 INTERFACE_MODE_DISABLED, 67 INTERFACE_MODE_RGMII, 68 INTERFACE_MODE_GMII, 69 INTERFACE_MODE_SPI, 70 INTERFACE_MODE_PCIE, 71 INTERFACE_MODE_XAUI, 72 INTERFACE_MODE_SGMII, 73 INTERFACE_MODE_PICMG, 74 INTERFACE_MODE_NPI, 75 INTERFACE_MODE_LOOP, 76 INTERFACE_MODE_SRIO, 77 INTERFACE_MODE_ILK, 78 INTERFACE_MODE_RXAUI, 79 INTERFACE_MODE_QSGMII, 80 INTERFACE_MODE_AGL, 81 INTERFACE_MODE_XLAUI, 82 INTERFACE_MODE_XFI, 83 INTERFACE_MODE_10G_KR, 84 INTERFACE_MODE_40G_KR4, 85 INTERFACE_MODE_MIXED, 86 }; 87 88 #define OCT_ETHTOOL_REGDUMP_LEN 4096 89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) 90 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) 91 #define OCT_ETHTOOL_REGSVER 1 92 93 /* statistics of PF */ 94 static const char oct_stats_strings[][ETH_GSTRING_LEN] = { 95 "rx_packets", 96 "tx_packets", 97 "rx_bytes", 98 "tx_bytes", 99 "rx_errors", 100 "tx_errors", 101 "rx_dropped", 102 "tx_dropped", 103 104 "tx_total_sent", 105 "tx_total_fwd", 106 "tx_err_pko", 107 "tx_err_pki", 108 "tx_err_link", 109 "tx_err_drop", 110 111 "tx_tso", 112 "tx_tso_packets", 113 "tx_tso_err", 114 "tx_vxlan", 115 116 "mac_tx_total_pkts", 117 "mac_tx_total_bytes", 118 "mac_tx_mcast_pkts", 119 "mac_tx_bcast_pkts", 120 "mac_tx_ctl_packets", 121 "mac_tx_total_collisions", 122 "mac_tx_one_collision", 123 "mac_tx_multi_collison", 124 "mac_tx_max_collision_fail", 125 "mac_tx_max_deferal_fail", 126 "mac_tx_fifo_err", 127 "mac_tx_runts", 128 129 "rx_total_rcvd", 130 "rx_total_fwd", 131 "rx_jabber_err", 132 "rx_l2_err", 133 "rx_frame_err", 134 "rx_err_pko", 135 "rx_err_link", 136 "rx_err_drop", 137 138 "rx_vxlan", 139 "rx_vxlan_err", 140 141 "rx_lro_pkts", 142 "rx_lro_bytes", 143 "rx_total_lro", 144 145 "rx_lro_aborts", 146 "rx_lro_aborts_port", 147 "rx_lro_aborts_seq", 148 "rx_lro_aborts_tsval", 149 "rx_lro_aborts_timer", 150 "rx_fwd_rate", 151 152 "mac_rx_total_rcvd", 153 "mac_rx_bytes", 154 "mac_rx_total_bcst", 155 "mac_rx_total_mcst", 156 "mac_rx_runts", 157 "mac_rx_ctl_packets", 158 "mac_rx_fifo_err", 159 "mac_rx_dma_drop", 160 "mac_rx_fcs_err", 161 162 "link_state_changes", 163 }; 164 165 /* statistics of VF */ 166 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { 167 "rx_packets", 168 "tx_packets", 169 "rx_bytes", 170 "tx_bytes", 171 "rx_errors", 172 "tx_errors", 173 "rx_dropped", 174 "tx_dropped", 175 "link_state_changes", 176 }; 177 178 /* statistics of host tx queue */ 179 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { 180 "packets", 181 "bytes", 182 "dropped", 183 "iq_busy", 184 "sgentry_sent", 185 186 "fw_instr_posted", 187 "fw_instr_processed", 188 "fw_instr_dropped", 189 "fw_bytes_sent", 190 191 "tso", 192 "vxlan", 193 "txq_restart", 194 }; 195 196 /* statistics of host rx queue */ 197 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { 198 "packets", 199 "bytes", 200 "dropped", 201 "dropped_nomem", 202 "dropped_toomany", 203 "fw_dropped", 204 "fw_pkts_received", 205 "fw_bytes_received", 206 "fw_dropped_nodispatch", 207 208 "vxlan", 209 "buffer_alloc_failure", 210 }; 211 212 /* LiquidIO driver private flags */ 213 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { 214 }; 215 216 #define OCTNIC_NCMD_AUTONEG_ON 0x1 217 #define OCTNIC_NCMD_PHY_ON 0x2 218 219 static int lio_get_link_ksettings(struct net_device *netdev, 220 struct ethtool_link_ksettings *ecmd) 221 { 222 struct lio *lio = GET_LIO(netdev); 223 struct octeon_device *oct = lio->oct_dev; 224 struct oct_link_info *linfo; 225 u32 supported = 0, advertising = 0; 226 227 linfo = &lio->linfo; 228 229 switch (linfo->link.s.phy_type) { 230 case LIO_PHY_PORT_TP: 231 ecmd->base.port = PORT_TP; 232 supported = (SUPPORTED_10000baseT_Full | 233 SUPPORTED_TP | SUPPORTED_Pause); 234 advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); 235 ecmd->base.autoneg = AUTONEG_DISABLE; 236 break; 237 238 case LIO_PHY_PORT_FIBRE: 239 ecmd->base.port = PORT_FIBRE; 240 241 if (linfo->link.s.speed == SPEED_10000) { 242 supported = SUPPORTED_10000baseT_Full; 243 advertising = ADVERTISED_10000baseT_Full; 244 } 245 246 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; 247 advertising |= ADVERTISED_Pause; 248 ecmd->base.autoneg = AUTONEG_DISABLE; 249 break; 250 } 251 252 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || 253 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || 254 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || 255 linfo->link.s.if_mode == INTERFACE_MODE_XFI) { 256 ethtool_convert_legacy_u32_to_link_mode( 257 ecmd->link_modes.supported, supported); 258 ethtool_convert_legacy_u32_to_link_mode( 259 ecmd->link_modes.advertising, advertising); 260 } else { 261 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", 262 linfo->link.s.if_mode); 263 } 264 265 if (linfo->link.s.link_up) { 266 ecmd->base.speed = linfo->link.s.speed; 267 ecmd->base.duplex = linfo->link.s.duplex; 268 } else { 269 ecmd->base.speed = SPEED_UNKNOWN; 270 ecmd->base.duplex = DUPLEX_UNKNOWN; 271 } 272 273 return 0; 274 } 275 276 static void 277 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 278 { 279 struct lio *lio; 280 struct octeon_device *oct; 281 282 lio = GET_LIO(netdev); 283 oct = lio->oct_dev; 284 285 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 286 strcpy(drvinfo->driver, "liquidio"); 287 strcpy(drvinfo->version, LIQUIDIO_VERSION); 288 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 289 ETHTOOL_FWVERS_LEN); 290 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 291 } 292 293 static void 294 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 295 { 296 struct octeon_device *oct; 297 struct lio *lio; 298 299 lio = GET_LIO(netdev); 300 oct = lio->oct_dev; 301 302 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 303 strcpy(drvinfo->driver, "liquidio_vf"); 304 strcpy(drvinfo->version, LIQUIDIO_VERSION); 305 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, 306 ETHTOOL_FWVERS_LEN); 307 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); 308 } 309 310 static int 311 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) 312 { 313 struct lio *lio = GET_LIO(netdev); 314 struct octeon_device *oct = lio->oct_dev; 315 struct octnic_ctrl_pkt nctrl; 316 int ret = 0; 317 318 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 319 320 nctrl.ncmd.u64 = 0; 321 nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; 322 nctrl.ncmd.s.param1 = num_queues; 323 nctrl.ncmd.s.param2 = num_queues; 324 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 325 nctrl.wait_time = 100; 326 nctrl.netpndev = (u64)netdev; 327 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 328 329 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 330 if (ret < 0) { 331 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", 332 ret); 333 return -1; 334 } 335 336 return 0; 337 } 338 339 static void 340 lio_ethtool_get_channels(struct net_device *dev, 341 struct ethtool_channels *channel) 342 { 343 struct lio *lio = GET_LIO(dev); 344 struct octeon_device *oct = lio->oct_dev; 345 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; 346 u32 combined_count = 0, max_combined = 0; 347 348 if (OCTEON_CN6XXX(oct)) { 349 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 350 351 max_rx = CFG_GET_OQ_MAX_Q(conf6x); 352 max_tx = CFG_GET_IQ_MAX_Q(conf6x); 353 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 354 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 355 } else if (OCTEON_CN23XX_PF(oct)) { 356 max_combined = lio->linfo.num_txpciq; 357 combined_count = oct->num_iqs; 358 } else if (OCTEON_CN23XX_VF(oct)) { 359 u64 reg_val = 0ULL; 360 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 361 362 reg_val = octeon_read_csr64(oct, ctrl); 363 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 364 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 365 combined_count = oct->num_iqs; 366 } 367 368 channel->max_rx = max_rx; 369 channel->max_tx = max_tx; 370 channel->max_combined = max_combined; 371 channel->rx_count = rx_count; 372 channel->tx_count = tx_count; 373 channel->combined_count = combined_count; 374 } 375 376 static int 377 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) 378 { 379 struct msix_entry *msix_entries; 380 int num_msix_irqs = 0; 381 int i; 382 383 if (!oct->msix_on) 384 return 0; 385 386 /* Disable the input and output queues now. No more packets will 387 * arrive from Octeon. 388 */ 389 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 390 391 if (oct->msix_on) { 392 if (OCTEON_CN23XX_PF(oct)) 393 num_msix_irqs = oct->num_msix_irqs - 1; 394 else if (OCTEON_CN23XX_VF(oct)) 395 num_msix_irqs = oct->num_msix_irqs; 396 397 msix_entries = (struct msix_entry *)oct->msix_entries; 398 for (i = 0; i < num_msix_irqs; i++) { 399 if (oct->ioq_vector[i].vector) { 400 /* clear the affinity_cpumask */ 401 irq_set_affinity_hint(msix_entries[i].vector, 402 NULL); 403 free_irq(msix_entries[i].vector, 404 &oct->ioq_vector[i]); 405 oct->ioq_vector[i].vector = 0; 406 } 407 } 408 409 /* non-iov vector's argument is oct struct */ 410 if (OCTEON_CN23XX_PF(oct)) 411 free_irq(msix_entries[i].vector, oct); 412 413 pci_disable_msix(oct->pci_dev); 414 kfree(oct->msix_entries); 415 oct->msix_entries = NULL; 416 } 417 418 kfree(oct->irq_name_storage); 419 oct->irq_name_storage = NULL; 420 if (octeon_setup_interrupt(oct, num_ioqs)) { 421 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); 422 return 1; 423 } 424 425 /* Enable Octeon device interrupts */ 426 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 427 428 return 0; 429 } 430 431 static int 432 lio_ethtool_set_channels(struct net_device *dev, 433 struct ethtool_channels *channel) 434 { 435 u32 combined_count, max_combined; 436 struct lio *lio = GET_LIO(dev); 437 struct octeon_device *oct = lio->oct_dev; 438 int stopped = 0; 439 440 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { 441 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); 442 return -EINVAL; 443 } 444 445 if (!channel->combined_count || channel->other_count || 446 channel->rx_count || channel->tx_count) 447 return -EINVAL; 448 449 combined_count = channel->combined_count; 450 451 if (OCTEON_CN23XX_PF(oct)) { 452 max_combined = channel->max_combined; 453 } else if (OCTEON_CN23XX_VF(oct)) { 454 u64 reg_val = 0ULL; 455 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 456 457 reg_val = octeon_read_csr64(oct, ctrl); 458 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; 459 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; 460 } else { 461 return -EINVAL; 462 } 463 464 if (combined_count > max_combined || combined_count < 1) 465 return -EINVAL; 466 467 if (combined_count == oct->num_iqs) 468 return 0; 469 470 ifstate_set(lio, LIO_IFSTATE_RESETTING); 471 472 if (netif_running(dev)) { 473 dev->netdev_ops->ndo_stop(dev); 474 stopped = 1; 475 } 476 477 if (lio_reset_queues(dev, combined_count)) 478 return -EINVAL; 479 480 lio_irq_reallocate_irqs(oct, combined_count); 481 if (stopped) 482 dev->netdev_ops->ndo_open(dev); 483 484 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 485 486 return 0; 487 } 488 489 static int lio_get_eeprom_len(struct net_device *netdev) 490 { 491 u8 buf[192]; 492 struct lio *lio = GET_LIO(netdev); 493 struct octeon_device *oct_dev = lio->oct_dev; 494 struct octeon_board_info *board_info; 495 int len; 496 497 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 498 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", 499 board_info->name, board_info->serial_number, 500 board_info->major, board_info->minor); 501 502 return len; 503 } 504 505 static int 506 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 507 u8 *bytes) 508 { 509 struct lio *lio = GET_LIO(netdev); 510 struct octeon_device *oct_dev = lio->oct_dev; 511 struct octeon_board_info *board_info; 512 513 if (eeprom->offset) 514 return -EINVAL; 515 516 eeprom->magic = oct_dev->pci_dev->vendor; 517 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); 518 sprintf((char *)bytes, 519 "boardname:%s serialnum:%s maj:%lld min:%lld\n", 520 board_info->name, board_info->serial_number, 521 board_info->major, board_info->minor); 522 523 return 0; 524 } 525 526 static int octnet_gpio_access(struct net_device *netdev, int addr, int val) 527 { 528 struct lio *lio = GET_LIO(netdev); 529 struct octeon_device *oct = lio->oct_dev; 530 struct octnic_ctrl_pkt nctrl; 531 int ret = 0; 532 533 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 534 535 nctrl.ncmd.u64 = 0; 536 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; 537 nctrl.ncmd.s.param1 = addr; 538 nctrl.ncmd.s.param2 = val; 539 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 540 nctrl.wait_time = 100; 541 nctrl.netpndev = (u64)netdev; 542 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 543 544 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 545 if (ret < 0) { 546 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 547 return -EINVAL; 548 } 549 550 return 0; 551 } 552 553 static int octnet_id_active(struct net_device *netdev, int val) 554 { 555 struct lio *lio = GET_LIO(netdev); 556 struct octeon_device *oct = lio->oct_dev; 557 struct octnic_ctrl_pkt nctrl; 558 int ret = 0; 559 560 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 561 562 nctrl.ncmd.u64 = 0; 563 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; 564 nctrl.ncmd.s.param1 = val; 565 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 566 nctrl.wait_time = 100; 567 nctrl.netpndev = (u64)netdev; 568 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 569 570 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 571 if (ret < 0) { 572 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); 573 return -EINVAL; 574 } 575 576 return 0; 577 } 578 579 /* Callback for when mdio command response arrives 580 */ 581 static void octnet_mdio_resp_callback(struct octeon_device *oct, 582 u32 status, 583 void *buf) 584 { 585 struct oct_mdio_cmd_context *mdio_cmd_ctx; 586 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 587 588 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 589 590 oct = lio_get_device(mdio_cmd_ctx->octeon_id); 591 if (status) { 592 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", 593 CVM_CAST64(status)); 594 WRITE_ONCE(mdio_cmd_ctx->cond, -1); 595 } else { 596 WRITE_ONCE(mdio_cmd_ctx->cond, 1); 597 } 598 wake_up_interruptible(&mdio_cmd_ctx->wc); 599 } 600 601 /* This routine provides PHY access routines for 602 * mdio clause45 . 603 */ 604 static int 605 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) 606 { 607 struct octeon_device *oct_dev = lio->oct_dev; 608 struct octeon_soft_command *sc; 609 struct oct_mdio_cmd_resp *mdio_cmd_rsp; 610 struct oct_mdio_cmd_context *mdio_cmd_ctx; 611 struct oct_mdio_cmd *mdio_cmd; 612 int retval = 0; 613 614 sc = (struct octeon_soft_command *) 615 octeon_alloc_soft_command(oct_dev, 616 sizeof(struct oct_mdio_cmd), 617 sizeof(struct oct_mdio_cmd_resp), 618 sizeof(struct oct_mdio_cmd_context)); 619 620 if (!sc) 621 return -ENOMEM; 622 623 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 624 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 625 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 626 627 WRITE_ONCE(mdio_cmd_ctx->cond, 0); 628 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); 629 mdio_cmd->op = op; 630 mdio_cmd->mdio_addr = loc; 631 if (op) 632 mdio_cmd->value1 = *value; 633 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); 634 635 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 636 637 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 638 0, 0, 0); 639 640 sc->wait_time = 1000; 641 sc->callback = octnet_mdio_resp_callback; 642 sc->callback_arg = sc; 643 644 init_waitqueue_head(&mdio_cmd_ctx->wc); 645 646 retval = octeon_send_soft_command(oct_dev, sc); 647 648 if (retval == IQ_SEND_FAILED) { 649 dev_err(&oct_dev->pci_dev->dev, 650 "octnet_mdio45_access instruction failed status: %x\n", 651 retval); 652 retval = -EBUSY; 653 } else { 654 /* Sleep on a wait queue till the cond flag indicates that the 655 * response arrived 656 */ 657 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); 658 retval = mdio_cmd_rsp->status; 659 if (retval) { 660 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); 661 retval = -EBUSY; 662 } else { 663 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 664 sizeof(struct oct_mdio_cmd) / 8); 665 666 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { 667 if (!op) 668 *value = mdio_cmd_rsp->resp.value1; 669 } else { 670 retval = -EINVAL; 671 } 672 } 673 } 674 675 octeon_free_soft_command(oct_dev, sc); 676 677 return retval; 678 } 679 680 static int lio_set_phys_id(struct net_device *netdev, 681 enum ethtool_phys_id_state state) 682 { 683 struct lio *lio = GET_LIO(netdev); 684 struct octeon_device *oct = lio->oct_dev; 685 int value, ret; 686 687 switch (state) { 688 case ETHTOOL_ID_ACTIVE: 689 if (oct->chip_id == OCTEON_CN66XX) { 690 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 691 VITESSE_PHY_GPIO_DRIVEON); 692 return 2; 693 694 } else if (oct->chip_id == OCTEON_CN68XX) { 695 /* Save the current LED settings */ 696 ret = octnet_mdio45_access(lio, 0, 697 LIO68XX_LED_BEACON_ADDR, 698 &lio->phy_beacon_val); 699 if (ret) 700 return ret; 701 702 ret = octnet_mdio45_access(lio, 0, 703 LIO68XX_LED_CTRL_ADDR, 704 &lio->led_ctrl_val); 705 if (ret) 706 return ret; 707 708 /* Configure Beacon values */ 709 value = LIO68XX_LED_BEACON_CFGON; 710 ret = octnet_mdio45_access(lio, 1, 711 LIO68XX_LED_BEACON_ADDR, 712 &value); 713 if (ret) 714 return ret; 715 716 value = LIO68XX_LED_CTRL_CFGON; 717 ret = octnet_mdio45_access(lio, 1, 718 LIO68XX_LED_CTRL_ADDR, 719 &value); 720 if (ret) 721 return ret; 722 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 723 octnet_id_active(netdev, LED_IDENTIFICATION_ON); 724 725 /* returns 0 since updates are asynchronous */ 726 return 0; 727 } else { 728 return -EINVAL; 729 } 730 break; 731 732 case ETHTOOL_ID_ON: 733 if (oct->chip_id == OCTEON_CN66XX) 734 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 735 VITESSE_PHY_GPIO_HIGH); 736 else 737 return -EINVAL; 738 739 break; 740 741 case ETHTOOL_ID_OFF: 742 if (oct->chip_id == OCTEON_CN66XX) 743 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 744 VITESSE_PHY_GPIO_LOW); 745 else 746 return -EINVAL; 747 748 break; 749 750 case ETHTOOL_ID_INACTIVE: 751 if (oct->chip_id == OCTEON_CN66XX) { 752 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, 753 VITESSE_PHY_GPIO_DRIVEOFF); 754 } else if (oct->chip_id == OCTEON_CN68XX) { 755 /* Restore LED settings */ 756 ret = octnet_mdio45_access(lio, 1, 757 LIO68XX_LED_CTRL_ADDR, 758 &lio->led_ctrl_val); 759 if (ret) 760 return ret; 761 762 ret = octnet_mdio45_access(lio, 1, 763 LIO68XX_LED_BEACON_ADDR, 764 &lio->phy_beacon_val); 765 if (ret) 766 return ret; 767 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 768 octnet_id_active(netdev, LED_IDENTIFICATION_OFF); 769 770 return 0; 771 } else { 772 return -EINVAL; 773 } 774 break; 775 776 default: 777 return -EINVAL; 778 } 779 780 return 0; 781 } 782 783 static void 784 lio_ethtool_get_ringparam(struct net_device *netdev, 785 struct ethtool_ringparam *ering) 786 { 787 struct lio *lio = GET_LIO(netdev); 788 struct octeon_device *oct = lio->oct_dev; 789 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, 790 rx_pending = 0; 791 792 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 793 return; 794 795 if (OCTEON_CN6XXX(oct)) { 796 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 797 798 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; 799 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; 800 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); 801 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 802 } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { 803 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; 804 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; 805 rx_pending = oct->droq[0]->max_count; 806 tx_pending = oct->instr_queue[0]->max_count; 807 } 808 809 ering->tx_pending = tx_pending; 810 ering->tx_max_pending = tx_max_pending; 811 ering->rx_pending = rx_pending; 812 ering->rx_max_pending = rx_max_pending; 813 ering->rx_mini_pending = 0; 814 ering->rx_jumbo_pending = 0; 815 ering->rx_mini_max_pending = 0; 816 ering->rx_jumbo_max_pending = 0; 817 } 818 819 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) 820 { 821 struct lio *lio = GET_LIO(netdev); 822 struct octeon_device *oct = lio->oct_dev; 823 struct napi_struct *napi, *n; 824 int i, update = 0; 825 826 if (wait_for_pending_requests(oct)) 827 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 828 829 if (lio_wait_for_instr_fetch(oct)) 830 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 831 832 if (octeon_set_io_queues_off(oct)) { 833 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 834 return -1; 835 } 836 837 /* Disable the input and output queues now. No more packets will 838 * arrive from Octeon. 839 */ 840 oct->fn_list.disable_io_queues(oct); 841 /* Delete NAPI */ 842 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 843 netif_napi_del(napi); 844 845 if (num_qs != oct->num_iqs) { 846 netif_set_real_num_rx_queues(netdev, num_qs); 847 netif_set_real_num_tx_queues(netdev, num_qs); 848 update = 1; 849 } 850 851 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 852 if (!(oct->io_qmask.oq & BIT_ULL(i))) 853 continue; 854 octeon_delete_droq(oct, i); 855 } 856 857 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 858 if (!(oct->io_qmask.iq & BIT_ULL(i))) 859 continue; 860 octeon_delete_instr_queue(oct, i); 861 } 862 863 if (oct->fn_list.setup_device_regs(oct)) { 864 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); 865 return -1; 866 } 867 868 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { 869 dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); 870 return -1; 871 } 872 873 /* Enable the input and output queues for this Octeon device */ 874 if (oct->fn_list.enable_io_queues(oct)) { 875 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); 876 return -1; 877 } 878 879 if (update && lio_send_queue_count_update(netdev, num_qs)) 880 return -1; 881 882 return 0; 883 } 884 885 static int lio_ethtool_set_ringparam(struct net_device *netdev, 886 struct ethtool_ringparam *ering) 887 { 888 u32 rx_count, tx_count, rx_count_old, tx_count_old; 889 struct lio *lio = GET_LIO(netdev); 890 struct octeon_device *oct = lio->oct_dev; 891 int stopped = 0; 892 893 if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) 894 return -EINVAL; 895 896 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 897 return -EINVAL; 898 899 rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, 900 CN23XX_MAX_OQ_DESCRIPTORS); 901 tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, 902 CN23XX_MAX_IQ_DESCRIPTORS); 903 904 rx_count_old = oct->droq[0]->max_count; 905 tx_count_old = oct->instr_queue[0]->max_count; 906 907 if (rx_count == rx_count_old && tx_count == tx_count_old) 908 return 0; 909 910 ifstate_set(lio, LIO_IFSTATE_RESETTING); 911 912 if (netif_running(netdev)) { 913 netdev->netdev_ops->ndo_stop(netdev); 914 stopped = 1; 915 } 916 917 /* Change RX/TX DESCS count */ 918 if (tx_count != tx_count_old) 919 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 920 tx_count); 921 if (rx_count != rx_count_old) 922 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 923 rx_count); 924 925 if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) 926 goto err_lio_reset_queues; 927 928 if (stopped) 929 netdev->netdev_ops->ndo_open(netdev); 930 931 ifstate_reset(lio, LIO_IFSTATE_RESETTING); 932 933 return 0; 934 935 err_lio_reset_queues: 936 if (tx_count != tx_count_old) 937 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 938 tx_count_old); 939 if (rx_count != rx_count_old) 940 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 941 rx_count_old); 942 return -EINVAL; 943 } 944 945 static u32 lio_get_msglevel(struct net_device *netdev) 946 { 947 struct lio *lio = GET_LIO(netdev); 948 949 return lio->msg_enable; 950 } 951 952 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) 953 { 954 struct lio *lio = GET_LIO(netdev); 955 956 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { 957 if (msglvl & NETIF_MSG_HW) 958 liquidio_set_feature(netdev, 959 OCTNET_CMD_VERBOSE_ENABLE, 0); 960 else 961 liquidio_set_feature(netdev, 962 OCTNET_CMD_VERBOSE_DISABLE, 0); 963 } 964 965 lio->msg_enable = msglvl; 966 } 967 968 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl) 969 { 970 struct lio *lio = GET_LIO(netdev); 971 972 lio->msg_enable = msglvl; 973 } 974 975 static void 976 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 977 { 978 /* Notes: Not supporting any auto negotiation in these 979 * drivers. Just report pause frame support. 980 */ 981 struct lio *lio = GET_LIO(netdev); 982 struct octeon_device *oct = lio->oct_dev; 983 984 pause->autoneg = 0; 985 986 pause->tx_pause = oct->tx_pause; 987 pause->rx_pause = oct->rx_pause; 988 } 989 990 static int 991 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 992 { 993 /* Notes: Not supporting any auto negotiation in these 994 * drivers. 995 */ 996 struct lio *lio = GET_LIO(netdev); 997 struct octeon_device *oct = lio->oct_dev; 998 struct octnic_ctrl_pkt nctrl; 999 struct oct_link_info *linfo = &lio->linfo; 1000 1001 int ret = 0; 1002 1003 if (oct->chip_id != OCTEON_CN23XX_PF_VID) 1004 return -EINVAL; 1005 1006 if (linfo->link.s.duplex == 0) { 1007 /*no flow control for half duplex*/ 1008 if (pause->rx_pause || pause->tx_pause) 1009 return -EINVAL; 1010 } 1011 1012 /*do not support autoneg of link flow control*/ 1013 if (pause->autoneg == AUTONEG_ENABLE) 1014 return -EINVAL; 1015 1016 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1017 1018 nctrl.ncmd.u64 = 0; 1019 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; 1020 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1021 nctrl.wait_time = 100; 1022 nctrl.netpndev = (u64)netdev; 1023 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1024 1025 if (pause->rx_pause) { 1026 /*enable rx pause*/ 1027 nctrl.ncmd.s.param1 = 1; 1028 } else { 1029 /*disable rx pause*/ 1030 nctrl.ncmd.s.param1 = 0; 1031 } 1032 1033 if (pause->tx_pause) { 1034 /*enable tx pause*/ 1035 nctrl.ncmd.s.param2 = 1; 1036 } else { 1037 /*disable tx pause*/ 1038 nctrl.ncmd.s.param2 = 0; 1039 } 1040 1041 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1042 if (ret < 0) { 1043 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); 1044 return -EINVAL; 1045 } 1046 1047 oct->rx_pause = pause->rx_pause; 1048 oct->tx_pause = pause->tx_pause; 1049 1050 return 0; 1051 } 1052 1053 static void 1054 lio_get_ethtool_stats(struct net_device *netdev, 1055 struct ethtool_stats *stats __attribute__((unused)), 1056 u64 *data) 1057 { 1058 struct lio *lio = GET_LIO(netdev); 1059 struct octeon_device *oct_dev = lio->oct_dev; 1060 struct net_device_stats *netstats = &netdev->stats; 1061 int i = 0, j; 1062 1063 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1064 return; 1065 1066 netdev->netdev_ops->ndo_get_stats(netdev); 1067 octnet_get_link_stats(netdev); 1068 1069 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1070 data[i++] = CVM_CAST64(netstats->rx_packets); 1071 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 1072 data[i++] = CVM_CAST64(netstats->tx_packets); 1073 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1074 data[i++] = CVM_CAST64(netstats->rx_bytes); 1075 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1076 data[i++] = CVM_CAST64(netstats->tx_bytes); 1077 data[i++] = CVM_CAST64(netstats->rx_errors + 1078 oct_dev->link_stats.fromwire.fcs_err + 1079 oct_dev->link_stats.fromwire.jabber_err + 1080 oct_dev->link_stats.fromwire.l2_err + 1081 oct_dev->link_stats.fromwire.frame_err); 1082 data[i++] = CVM_CAST64(netstats->tx_errors); 1083 /*sum of oct->droq[oq_no]->stats->rx_dropped + 1084 *oct->droq[oq_no]->stats->dropped_nodispatch + 1085 *oct->droq[oq_no]->stats->dropped_toomany + 1086 *oct->droq[oq_no]->stats->dropped_nomem 1087 */ 1088 data[i++] = CVM_CAST64(netstats->rx_dropped + 1089 oct_dev->link_stats.fromwire.fifo_err + 1090 oct_dev->link_stats.fromwire.dmac_drop + 1091 oct_dev->link_stats.fromwire.red_drops + 1092 oct_dev->link_stats.fromwire.fw_err_pko + 1093 oct_dev->link_stats.fromwire.fw_err_link + 1094 oct_dev->link_stats.fromwire.fw_err_drop); 1095 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1096 data[i++] = CVM_CAST64(netstats->tx_dropped + 1097 oct_dev->link_stats.fromhost.max_collision_fail + 1098 oct_dev->link_stats.fromhost.max_deferral_fail + 1099 oct_dev->link_stats.fromhost.total_collisions + 1100 oct_dev->link_stats.fromhost.fw_err_pko + 1101 oct_dev->link_stats.fromhost.fw_err_link + 1102 oct_dev->link_stats.fromhost.fw_err_drop + 1103 oct_dev->link_stats.fromhost.fw_err_pki); 1104 1105 /* firmware tx stats */ 1106 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 1107 *fromhost.fw_total_sent 1108 */ 1109 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); 1110 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ 1111 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); 1112 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ 1113 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); 1114 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ 1115 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); 1116 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ 1117 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); 1118 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1119 *fw_err_drop 1120 */ 1121 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); 1122 1123 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ 1124 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); 1125 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1126 *fw_tso_fwd 1127 */ 1128 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); 1129 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1130 *fw_err_tso 1131 */ 1132 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 1133 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. 1134 *fw_tx_vxlan 1135 */ 1136 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 1137 1138 /* mac tx statistics */ 1139 /*CVMX_BGXX_CMRX_TX_STAT5 */ 1140 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 1141 /*CVMX_BGXX_CMRX_TX_STAT4 */ 1142 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); 1143 /*CVMX_BGXX_CMRX_TX_STAT15 */ 1144 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); 1145 /*CVMX_BGXX_CMRX_TX_STAT14 */ 1146 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); 1147 /*CVMX_BGXX_CMRX_TX_STAT17 */ 1148 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); 1149 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1150 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); 1151 /*CVMX_BGXX_CMRX_TX_STAT3 */ 1152 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); 1153 /*CVMX_BGXX_CMRX_TX_STAT2 */ 1154 data[i++] = 1155 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); 1156 /*CVMX_BGXX_CMRX_TX_STAT0 */ 1157 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); 1158 /*CVMX_BGXX_CMRX_TX_STAT1 */ 1159 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); 1160 /*CVMX_BGXX_CMRX_TX_STAT16 */ 1161 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); 1162 /*CVMX_BGXX_CMRX_TX_STAT6 */ 1163 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); 1164 1165 /* RX firmware stats */ 1166 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1167 *fw_total_rcvd 1168 */ 1169 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); 1170 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1171 *fw_total_fwd 1172 */ 1173 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 1174 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 1175 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 1176 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 1177 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); 1178 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ 1179 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); 1180 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1181 *fw_err_pko 1182 */ 1183 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); 1184 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ 1185 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); 1186 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1187 *fromwire.fw_err_drop 1188 */ 1189 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 1190 1191 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1192 *fromwire.fw_rx_vxlan 1193 */ 1194 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); 1195 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. 1196 *fromwire.fw_rx_vxlan_err 1197 */ 1198 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); 1199 1200 /* LRO */ 1201 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1202 *fw_lro_pkts 1203 */ 1204 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); 1205 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1206 *fw_lro_octs 1207 */ 1208 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); 1209 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ 1210 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); 1211 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1212 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); 1213 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1214 *fw_lro_aborts_port 1215 */ 1216 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); 1217 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1218 *fw_lro_aborts_seq 1219 */ 1220 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); 1221 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1222 *fw_lro_aborts_tsval 1223 */ 1224 data[i++] = 1225 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); 1226 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 1227 *fw_lro_aborts_timer 1228 */ 1229 /* intrmod: packet forward rate */ 1230 data[i++] = 1231 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); 1232 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ 1233 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); 1234 1235 /* mac: link-level stats */ 1236 /*CVMX_BGXX_CMRX_RX_STAT0 */ 1237 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); 1238 /*CVMX_BGXX_CMRX_RX_STAT1 */ 1239 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); 1240 /*CVMX_PKI_STATX_STAT5 */ 1241 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); 1242 /*CVMX_PKI_STATX_STAT5 */ 1243 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); 1244 /*wqe->word2.err_code or wqe->word2.err_level */ 1245 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); 1246 /*CVMX_BGXX_CMRX_RX_STAT2 */ 1247 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); 1248 /*CVMX_BGXX_CMRX_RX_STAT6 */ 1249 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); 1250 /*CVMX_BGXX_CMRX_RX_STAT4 */ 1251 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); 1252 /*wqe->word2.err_code or wqe->word2.err_level */ 1253 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); 1254 /*lio->link_changes*/ 1255 data[i++] = CVM_CAST64(lio->link_changes); 1256 1257 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { 1258 if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) 1259 continue; 1260 /*packets to network port*/ 1261 /*# of packets tx to network */ 1262 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1263 /*# of bytes tx to network */ 1264 data[i++] = 1265 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1266 /*# of packets dropped */ 1267 data[i++] = 1268 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); 1269 /*# of tx fails due to queue full */ 1270 data[i++] = 1271 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); 1272 /*XXX gather entries sent */ 1273 data[i++] = 1274 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); 1275 1276 /*instruction to firmware: data and control */ 1277 /*# of instructions to the queue */ 1278 data[i++] = 1279 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); 1280 /*# of instructions processed */ 1281 data[i++] = CVM_CAST64( 1282 oct_dev->instr_queue[j]->stats.instr_processed); 1283 /*# of instructions could not be processed */ 1284 data[i++] = CVM_CAST64( 1285 oct_dev->instr_queue[j]->stats.instr_dropped); 1286 /*bytes sent through the queue */ 1287 data[i++] = 1288 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); 1289 1290 /*tso request*/ 1291 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1292 /*vxlan request*/ 1293 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1294 /*txq restart*/ 1295 data[i++] = 1296 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 1297 } 1298 1299 /* RX */ 1300 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { 1301 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) 1302 continue; 1303 1304 /*packets send to TCP/IP network stack */ 1305 /*# of packets to network stack */ 1306 data[i++] = 1307 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); 1308 /*# of bytes to network stack */ 1309 data[i++] = 1310 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); 1311 /*# of packets dropped */ 1312 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1313 oct_dev->droq[j]->stats.dropped_toomany + 1314 oct_dev->droq[j]->stats.rx_dropped); 1315 data[i++] = 1316 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1317 data[i++] = 1318 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1319 data[i++] = 1320 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1321 1322 /*control and data path*/ 1323 data[i++] = 1324 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1325 data[i++] = 1326 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1327 data[i++] = 1328 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1329 1330 data[i++] = 1331 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1332 data[i++] = 1333 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1334 } 1335 } 1336 1337 static void lio_vf_get_ethtool_stats(struct net_device *netdev, 1338 struct ethtool_stats *stats 1339 __attribute__((unused)), 1340 u64 *data) 1341 { 1342 struct net_device_stats *netstats = &netdev->stats; 1343 struct lio *lio = GET_LIO(netdev); 1344 struct octeon_device *oct_dev = lio->oct_dev; 1345 int i = 0, j, vj; 1346 1347 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1348 return; 1349 1350 netdev->netdev_ops->ndo_get_stats(netdev); 1351 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1352 data[i++] = CVM_CAST64(netstats->rx_packets); 1353 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1354 data[i++] = CVM_CAST64(netstats->tx_packets); 1355 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1356 data[i++] = CVM_CAST64(netstats->rx_bytes); 1357 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1358 data[i++] = CVM_CAST64(netstats->tx_bytes); 1359 data[i++] = CVM_CAST64(netstats->rx_errors); 1360 data[i++] = CVM_CAST64(netstats->tx_errors); 1361 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1362 * oct->droq[oq_no]->stats->dropped_nodispatch + 1363 * oct->droq[oq_no]->stats->dropped_toomany + 1364 * oct->droq[oq_no]->stats->dropped_nomem 1365 */ 1366 data[i++] = CVM_CAST64(netstats->rx_dropped); 1367 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1368 data[i++] = CVM_CAST64(netstats->tx_dropped); 1369 /* lio->link_changes */ 1370 data[i++] = CVM_CAST64(lio->link_changes); 1371 1372 for (vj = 0; vj < oct_dev->num_iqs; vj++) { 1373 j = lio->linfo.txpciq[vj].s.q_no; 1374 1375 /* packets to network port */ 1376 /* # of packets tx to network */ 1377 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); 1378 /* # of bytes tx to network */ 1379 data[i++] = CVM_CAST64( 1380 oct_dev->instr_queue[j]->stats.tx_tot_bytes); 1381 /* # of packets dropped */ 1382 data[i++] = CVM_CAST64( 1383 oct_dev->instr_queue[j]->stats.tx_dropped); 1384 /* # of tx fails due to queue full */ 1385 data[i++] = CVM_CAST64( 1386 oct_dev->instr_queue[j]->stats.tx_iq_busy); 1387 /* XXX gather entries sent */ 1388 data[i++] = CVM_CAST64( 1389 oct_dev->instr_queue[j]->stats.sgentry_sent); 1390 1391 /* instruction to firmware: data and control */ 1392 /* # of instructions to the queue */ 1393 data[i++] = CVM_CAST64( 1394 oct_dev->instr_queue[j]->stats.instr_posted); 1395 /* # of instructions processed */ 1396 data[i++] = 1397 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); 1398 /* # of instructions could not be processed */ 1399 data[i++] = 1400 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); 1401 /* bytes sent through the queue */ 1402 data[i++] = CVM_CAST64( 1403 oct_dev->instr_queue[j]->stats.bytes_sent); 1404 /* tso request */ 1405 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 1406 /* vxlan request */ 1407 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); 1408 /* txq restart */ 1409 data[i++] = CVM_CAST64( 1410 oct_dev->instr_queue[j]->stats.tx_restart); 1411 } 1412 1413 /* RX */ 1414 for (vj = 0; vj < oct_dev->num_oqs; vj++) { 1415 j = lio->linfo.rxpciq[vj].s.q_no; 1416 1417 /* packets send to TCP/IP network stack */ 1418 /* # of packets to network stack */ 1419 data[i++] = CVM_CAST64( 1420 oct_dev->droq[j]->stats.rx_pkts_received); 1421 /* # of bytes to network stack */ 1422 data[i++] = CVM_CAST64( 1423 oct_dev->droq[j]->stats.rx_bytes_received); 1424 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + 1425 oct_dev->droq[j]->stats.dropped_toomany + 1426 oct_dev->droq[j]->stats.rx_dropped); 1427 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); 1428 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); 1429 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); 1430 1431 /* control and data path */ 1432 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); 1433 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 1434 data[i++] = 1435 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 1436 1437 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); 1438 data[i++] = 1439 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 1440 } 1441 } 1442 1443 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) 1444 { 1445 struct octeon_device *oct_dev = lio->oct_dev; 1446 int i; 1447 1448 switch (oct_dev->chip_id) { 1449 case OCTEON_CN23XX_PF_VID: 1450 case OCTEON_CN23XX_VF_VID: 1451 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { 1452 sprintf(data, "%s", oct_priv_flags_strings[i]); 1453 data += ETH_GSTRING_LEN; 1454 } 1455 break; 1456 case OCTEON_CN68XX: 1457 case OCTEON_CN66XX: 1458 break; 1459 default: 1460 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1461 break; 1462 } 1463 } 1464 1465 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1466 { 1467 struct lio *lio = GET_LIO(netdev); 1468 struct octeon_device *oct_dev = lio->oct_dev; 1469 int num_iq_stats, num_oq_stats, i, j; 1470 int num_stats; 1471 1472 switch (stringset) { 1473 case ETH_SS_STATS: 1474 num_stats = ARRAY_SIZE(oct_stats_strings); 1475 for (j = 0; j < num_stats; j++) { 1476 sprintf(data, "%s", oct_stats_strings[j]); 1477 data += ETH_GSTRING_LEN; 1478 } 1479 1480 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1481 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1482 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1483 continue; 1484 for (j = 0; j < num_iq_stats; j++) { 1485 sprintf(data, "tx-%d-%s", i, 1486 oct_iq_stats_strings[j]); 1487 data += ETH_GSTRING_LEN; 1488 } 1489 } 1490 1491 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1492 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1493 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1494 continue; 1495 for (j = 0; j < num_oq_stats; j++) { 1496 sprintf(data, "rx-%d-%s", i, 1497 oct_droq_stats_strings[j]); 1498 data += ETH_GSTRING_LEN; 1499 } 1500 } 1501 break; 1502 1503 case ETH_SS_PRIV_FLAGS: 1504 lio_get_priv_flags_strings(lio, data); 1505 break; 1506 default: 1507 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1508 break; 1509 } 1510 } 1511 1512 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, 1513 u8 *data) 1514 { 1515 int num_iq_stats, num_oq_stats, i, j; 1516 struct lio *lio = GET_LIO(netdev); 1517 struct octeon_device *oct_dev = lio->oct_dev; 1518 int num_stats; 1519 1520 switch (stringset) { 1521 case ETH_SS_STATS: 1522 num_stats = ARRAY_SIZE(oct_vf_stats_strings); 1523 for (j = 0; j < num_stats; j++) { 1524 sprintf(data, "%s", oct_vf_stats_strings[j]); 1525 data += ETH_GSTRING_LEN; 1526 } 1527 1528 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); 1529 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { 1530 if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) 1531 continue; 1532 for (j = 0; j < num_iq_stats; j++) { 1533 sprintf(data, "tx-%d-%s", i, 1534 oct_iq_stats_strings[j]); 1535 data += ETH_GSTRING_LEN; 1536 } 1537 } 1538 1539 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); 1540 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { 1541 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) 1542 continue; 1543 for (j = 0; j < num_oq_stats; j++) { 1544 sprintf(data, "rx-%d-%s", i, 1545 oct_droq_stats_strings[j]); 1546 data += ETH_GSTRING_LEN; 1547 } 1548 } 1549 break; 1550 1551 case ETH_SS_PRIV_FLAGS: 1552 lio_get_priv_flags_strings(lio, data); 1553 break; 1554 default: 1555 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); 1556 break; 1557 } 1558 } 1559 1560 static int lio_get_priv_flags_ss_count(struct lio *lio) 1561 { 1562 struct octeon_device *oct_dev = lio->oct_dev; 1563 1564 switch (oct_dev->chip_id) { 1565 case OCTEON_CN23XX_PF_VID: 1566 case OCTEON_CN23XX_VF_VID: 1567 return ARRAY_SIZE(oct_priv_flags_strings); 1568 case OCTEON_CN68XX: 1569 case OCTEON_CN66XX: 1570 return -EOPNOTSUPP; 1571 default: 1572 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1573 return -EOPNOTSUPP; 1574 } 1575 } 1576 1577 static int lio_get_sset_count(struct net_device *netdev, int sset) 1578 { 1579 struct lio *lio = GET_LIO(netdev); 1580 struct octeon_device *oct_dev = lio->oct_dev; 1581 1582 switch (sset) { 1583 case ETH_SS_STATS: 1584 return (ARRAY_SIZE(oct_stats_strings) + 1585 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1586 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1587 case ETH_SS_PRIV_FLAGS: 1588 return lio_get_priv_flags_ss_count(lio); 1589 default: 1590 return -EOPNOTSUPP; 1591 } 1592 } 1593 1594 static int lio_vf_get_sset_count(struct net_device *netdev, int sset) 1595 { 1596 struct lio *lio = GET_LIO(netdev); 1597 struct octeon_device *oct_dev = lio->oct_dev; 1598 1599 switch (sset) { 1600 case ETH_SS_STATS: 1601 return (ARRAY_SIZE(oct_vf_stats_strings) + 1602 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + 1603 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); 1604 case ETH_SS_PRIV_FLAGS: 1605 return lio_get_priv_flags_ss_count(lio); 1606 default: 1607 return -EOPNOTSUPP; 1608 } 1609 } 1610 1611 /* Callback function for intrmod */ 1612 static void octnet_intrmod_callback(struct octeon_device *oct_dev, 1613 u32 status, 1614 void *ptr) 1615 { 1616 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 1617 struct oct_intrmod_context *ctx; 1618 1619 ctx = (struct oct_intrmod_context *)sc->ctxptr; 1620 1621 ctx->status = status; 1622 1623 WRITE_ONCE(ctx->cond, 1); 1624 1625 /* This barrier is required to be sure that the response has been 1626 * written fully before waking up the handler 1627 */ 1628 wmb(); 1629 1630 wake_up_interruptible(&ctx->wc); 1631 } 1632 1633 /* get interrupt moderation parameters */ 1634 static int octnet_get_intrmod_cfg(struct lio *lio, 1635 struct oct_intrmod_cfg *intr_cfg) 1636 { 1637 struct octeon_soft_command *sc; 1638 struct oct_intrmod_context *ctx; 1639 struct oct_intrmod_resp *resp; 1640 int retval; 1641 struct octeon_device *oct_dev = lio->oct_dev; 1642 1643 /* Alloc soft command */ 1644 sc = (struct octeon_soft_command *) 1645 octeon_alloc_soft_command(oct_dev, 1646 0, 1647 sizeof(struct oct_intrmod_resp), 1648 sizeof(struct oct_intrmod_context)); 1649 1650 if (!sc) 1651 return -ENOMEM; 1652 1653 resp = (struct oct_intrmod_resp *)sc->virtrptr; 1654 memset(resp, 0, sizeof(struct oct_intrmod_resp)); 1655 1656 ctx = (struct oct_intrmod_context *)sc->ctxptr; 1657 memset(ctx, 0, sizeof(struct oct_intrmod_context)); 1658 WRITE_ONCE(ctx->cond, 0); 1659 ctx->octeon_id = lio_get_device_id(oct_dev); 1660 init_waitqueue_head(&ctx->wc); 1661 1662 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1663 1664 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1665 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); 1666 1667 sc->callback = octnet_intrmod_callback; 1668 sc->callback_arg = sc; 1669 sc->wait_time = 1000; 1670 1671 retval = octeon_send_soft_command(oct_dev, sc); 1672 if (retval == IQ_SEND_FAILED) { 1673 octeon_free_soft_command(oct_dev, sc); 1674 return -EINVAL; 1675 } 1676 1677 /* Sleep on a wait queue till the cond flag indicates that the 1678 * response arrived or timed-out. 1679 */ 1680 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 1681 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); 1682 goto intrmod_info_wait_intr; 1683 } 1684 1685 retval = ctx->status || resp->status; 1686 if (retval) { 1687 dev_err(&oct_dev->pci_dev->dev, 1688 "Get interrupt moderation parameters failed\n"); 1689 goto intrmod_info_wait_fail; 1690 } 1691 1692 octeon_swap_8B_data((u64 *)&resp->intrmod, 1693 (sizeof(struct oct_intrmod_cfg)) / 8); 1694 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); 1695 octeon_free_soft_command(oct_dev, sc); 1696 1697 return 0; 1698 1699 intrmod_info_wait_fail: 1700 1701 octeon_free_soft_command(oct_dev, sc); 1702 1703 intrmod_info_wait_intr: 1704 1705 return -ENODEV; 1706 } 1707 1708 /* Configure interrupt moderation parameters */ 1709 static int octnet_set_intrmod_cfg(struct lio *lio, 1710 struct oct_intrmod_cfg *intr_cfg) 1711 { 1712 struct octeon_soft_command *sc; 1713 struct oct_intrmod_context *ctx; 1714 struct oct_intrmod_cfg *cfg; 1715 int retval; 1716 struct octeon_device *oct_dev = lio->oct_dev; 1717 1718 /* Alloc soft command */ 1719 sc = (struct octeon_soft_command *) 1720 octeon_alloc_soft_command(oct_dev, 1721 sizeof(struct oct_intrmod_cfg), 1722 0, 1723 sizeof(struct oct_intrmod_context)); 1724 1725 if (!sc) 1726 return -ENOMEM; 1727 1728 ctx = (struct oct_intrmod_context *)sc->ctxptr; 1729 1730 WRITE_ONCE(ctx->cond, 0); 1731 ctx->octeon_id = lio_get_device_id(oct_dev); 1732 init_waitqueue_head(&ctx->wc); 1733 1734 cfg = (struct oct_intrmod_cfg *)sc->virtdptr; 1735 1736 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); 1737 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); 1738 1739 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1740 1741 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1742 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); 1743 1744 sc->callback = octnet_intrmod_callback; 1745 sc->callback_arg = sc; 1746 sc->wait_time = 1000; 1747 1748 retval = octeon_send_soft_command(oct_dev, sc); 1749 if (retval == IQ_SEND_FAILED) { 1750 octeon_free_soft_command(oct_dev, sc); 1751 return -EINVAL; 1752 } 1753 1754 /* Sleep on a wait queue till the cond flag indicates that the 1755 * response arrived or timed-out. 1756 */ 1757 if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { 1758 retval = ctx->status; 1759 if (retval) 1760 dev_err(&oct_dev->pci_dev->dev, 1761 "intrmod config failed. Status: %llx\n", 1762 CVM_CAST64(retval)); 1763 else 1764 dev_info(&oct_dev->pci_dev->dev, 1765 "Rx-Adaptive Interrupt moderation %s\n", 1766 (intr_cfg->rx_enable) ? 1767 "enabled" : "disabled"); 1768 1769 octeon_free_soft_command(oct_dev, sc); 1770 1771 return ((retval) ? -ENODEV : 0); 1772 } 1773 1774 dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); 1775 1776 return -EINTR; 1777 } 1778 1779 static void 1780 octnet_nic_stats_callback(struct octeon_device *oct_dev, 1781 u32 status, void *ptr) 1782 { 1783 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 1784 struct oct_nic_stats_resp *resp = 1785 (struct oct_nic_stats_resp *)sc->virtrptr; 1786 struct oct_nic_stats_ctrl *ctrl = 1787 (struct oct_nic_stats_ctrl *)sc->ctxptr; 1788 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; 1789 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; 1790 1791 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; 1792 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; 1793 1794 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { 1795 octeon_swap_8B_data((u64 *)&resp->stats, 1796 (sizeof(struct oct_link_stats)) >> 3); 1797 1798 /* RX link-level stats */ 1799 rstats->total_rcvd = rsp_rstats->total_rcvd; 1800 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; 1801 rstats->total_bcst = rsp_rstats->total_bcst; 1802 rstats->total_mcst = rsp_rstats->total_mcst; 1803 rstats->runts = rsp_rstats->runts; 1804 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; 1805 /* Accounts for over/under-run of buffers */ 1806 rstats->fifo_err = rsp_rstats->fifo_err; 1807 rstats->dmac_drop = rsp_rstats->dmac_drop; 1808 rstats->fcs_err = rsp_rstats->fcs_err; 1809 rstats->jabber_err = rsp_rstats->jabber_err; 1810 rstats->l2_err = rsp_rstats->l2_err; 1811 rstats->frame_err = rsp_rstats->frame_err; 1812 rstats->red_drops = rsp_rstats->red_drops; 1813 1814 /* RX firmware stats */ 1815 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; 1816 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; 1817 rstats->fw_err_pko = rsp_rstats->fw_err_pko; 1818 rstats->fw_err_link = rsp_rstats->fw_err_link; 1819 rstats->fw_err_drop = rsp_rstats->fw_err_drop; 1820 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; 1821 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; 1822 1823 /* Number of packets that are LROed */ 1824 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; 1825 /* Number of octets that are LROed */ 1826 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; 1827 /* Number of LRO packets formed */ 1828 rstats->fw_total_lro = rsp_rstats->fw_total_lro; 1829 /* Number of times lRO of packet aborted */ 1830 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; 1831 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; 1832 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; 1833 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; 1834 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; 1835 /* intrmod: packet forward rate */ 1836 rstats->fwd_rate = rsp_rstats->fwd_rate; 1837 1838 /* TX link-level stats */ 1839 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; 1840 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; 1841 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; 1842 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; 1843 tstats->ctl_sent = rsp_tstats->ctl_sent; 1844 /* Packets sent after one collision*/ 1845 tstats->one_collision_sent = rsp_tstats->one_collision_sent; 1846 /* Packets sent after multiple collision*/ 1847 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; 1848 /* Packets not sent due to max collisions */ 1849 tstats->max_collision_fail = rsp_tstats->max_collision_fail; 1850 /* Packets not sent due to max deferrals */ 1851 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; 1852 /* Accounts for over/under-run of buffers */ 1853 tstats->fifo_err = rsp_tstats->fifo_err; 1854 tstats->runts = rsp_tstats->runts; 1855 /* Total number of collisions detected */ 1856 tstats->total_collisions = rsp_tstats->total_collisions; 1857 1858 /* firmware stats */ 1859 tstats->fw_total_sent = rsp_tstats->fw_total_sent; 1860 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; 1861 tstats->fw_err_pko = rsp_tstats->fw_err_pko; 1862 tstats->fw_err_pki = rsp_tstats->fw_err_pki; 1863 tstats->fw_err_link = rsp_tstats->fw_err_link; 1864 tstats->fw_err_drop = rsp_tstats->fw_err_drop; 1865 tstats->fw_tso = rsp_tstats->fw_tso; 1866 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; 1867 tstats->fw_err_tso = rsp_tstats->fw_err_tso; 1868 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; 1869 1870 resp->status = 1; 1871 } else { 1872 resp->status = -1; 1873 } 1874 complete(&ctrl->complete); 1875 } 1876 1877 /* Configure interrupt moderation parameters */ 1878 static int octnet_get_link_stats(struct net_device *netdev) 1879 { 1880 struct lio *lio = GET_LIO(netdev); 1881 struct octeon_device *oct_dev = lio->oct_dev; 1882 1883 struct octeon_soft_command *sc; 1884 struct oct_nic_stats_ctrl *ctrl; 1885 struct oct_nic_stats_resp *resp; 1886 1887 int retval; 1888 1889 /* Alloc soft command */ 1890 sc = (struct octeon_soft_command *) 1891 octeon_alloc_soft_command(oct_dev, 1892 0, 1893 sizeof(struct oct_nic_stats_resp), 1894 sizeof(struct octnic_ctrl_pkt)); 1895 1896 if (!sc) 1897 return -ENOMEM; 1898 1899 resp = (struct oct_nic_stats_resp *)sc->virtrptr; 1900 memset(resp, 0, sizeof(struct oct_nic_stats_resp)); 1901 1902 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; 1903 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); 1904 ctrl->netdev = netdev; 1905 init_completion(&ctrl->complete); 1906 1907 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1908 1909 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1910 OPCODE_NIC_PORT_STATS, 0, 0, 0); 1911 1912 sc->callback = octnet_nic_stats_callback; 1913 sc->callback_arg = sc; 1914 sc->wait_time = 500; /*in milli seconds*/ 1915 1916 retval = octeon_send_soft_command(oct_dev, sc); 1917 if (retval == IQ_SEND_FAILED) { 1918 octeon_free_soft_command(oct_dev, sc); 1919 return -EINVAL; 1920 } 1921 1922 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); 1923 1924 if (resp->status != 1) { 1925 octeon_free_soft_command(oct_dev, sc); 1926 1927 return -EINVAL; 1928 } 1929 1930 octeon_free_soft_command(oct_dev, sc); 1931 1932 return 0; 1933 } 1934 1935 static int lio_get_intr_coalesce(struct net_device *netdev, 1936 struct ethtool_coalesce *intr_coal) 1937 { 1938 struct lio *lio = GET_LIO(netdev); 1939 struct octeon_device *oct = lio->oct_dev; 1940 struct octeon_instr_queue *iq; 1941 struct oct_intrmod_cfg intrmod_cfg; 1942 1943 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) 1944 return -ENODEV; 1945 1946 switch (oct->chip_id) { 1947 case OCTEON_CN23XX_PF_VID: 1948 case OCTEON_CN23XX_VF_VID: { 1949 if (!intrmod_cfg.rx_enable) { 1950 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; 1951 intr_coal->rx_max_coalesced_frames = 1952 oct->rx_max_coalesced_frames; 1953 } 1954 if (!intrmod_cfg.tx_enable) 1955 intr_coal->tx_max_coalesced_frames = 1956 oct->tx_max_coalesced_frames; 1957 break; 1958 } 1959 case OCTEON_CN68XX: 1960 case OCTEON_CN66XX: { 1961 struct octeon_cn6xxx *cn6xxx = 1962 (struct octeon_cn6xxx *)oct->chip; 1963 1964 if (!intrmod_cfg.rx_enable) { 1965 intr_coal->rx_coalesce_usecs = 1966 CFG_GET_OQ_INTR_TIME(cn6xxx->conf); 1967 intr_coal->rx_max_coalesced_frames = 1968 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 1969 } 1970 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 1971 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 1972 break; 1973 } 1974 default: 1975 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); 1976 return -EINVAL; 1977 } 1978 if (intrmod_cfg.rx_enable) { 1979 intr_coal->use_adaptive_rx_coalesce = 1980 intrmod_cfg.rx_enable; 1981 intr_coal->rate_sample_interval = 1982 intrmod_cfg.check_intrvl; 1983 intr_coal->pkt_rate_high = 1984 intrmod_cfg.maxpkt_ratethr; 1985 intr_coal->pkt_rate_low = 1986 intrmod_cfg.minpkt_ratethr; 1987 intr_coal->rx_max_coalesced_frames_high = 1988 intrmod_cfg.rx_maxcnt_trigger; 1989 intr_coal->rx_coalesce_usecs_high = 1990 intrmod_cfg.rx_maxtmr_trigger; 1991 intr_coal->rx_coalesce_usecs_low = 1992 intrmod_cfg.rx_mintmr_trigger; 1993 intr_coal->rx_max_coalesced_frames_low = 1994 intrmod_cfg.rx_mincnt_trigger; 1995 } 1996 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && 1997 (intrmod_cfg.tx_enable)) { 1998 intr_coal->use_adaptive_tx_coalesce = 1999 intrmod_cfg.tx_enable; 2000 intr_coal->tx_max_coalesced_frames_high = 2001 intrmod_cfg.tx_maxcnt_trigger; 2002 intr_coal->tx_max_coalesced_frames_low = 2003 intrmod_cfg.tx_mincnt_trigger; 2004 } 2005 return 0; 2006 } 2007 2008 /* Enable/Disable auto interrupt Moderation */ 2009 static int oct_cfg_adaptive_intr(struct lio *lio, 2010 struct oct_intrmod_cfg *intrmod_cfg, 2011 struct ethtool_coalesce *intr_coal) 2012 { 2013 int ret = 0; 2014 2015 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { 2016 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; 2017 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; 2018 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; 2019 } 2020 if (intrmod_cfg->rx_enable) { 2021 intrmod_cfg->rx_maxcnt_trigger = 2022 intr_coal->rx_max_coalesced_frames_high; 2023 intrmod_cfg->rx_maxtmr_trigger = 2024 intr_coal->rx_coalesce_usecs_high; 2025 intrmod_cfg->rx_mintmr_trigger = 2026 intr_coal->rx_coalesce_usecs_low; 2027 intrmod_cfg->rx_mincnt_trigger = 2028 intr_coal->rx_max_coalesced_frames_low; 2029 } 2030 if (intrmod_cfg->tx_enable) { 2031 intrmod_cfg->tx_maxcnt_trigger = 2032 intr_coal->tx_max_coalesced_frames_high; 2033 intrmod_cfg->tx_mincnt_trigger = 2034 intr_coal->tx_max_coalesced_frames_low; 2035 } 2036 2037 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); 2038 2039 return ret; 2040 } 2041 2042 static int 2043 oct_cfg_rx_intrcnt(struct lio *lio, 2044 struct oct_intrmod_cfg *intrmod, 2045 struct ethtool_coalesce *intr_coal) 2046 { 2047 struct octeon_device *oct = lio->oct_dev; 2048 u32 rx_max_coalesced_frames; 2049 2050 /* Config Cnt based interrupt values */ 2051 switch (oct->chip_id) { 2052 case OCTEON_CN68XX: 2053 case OCTEON_CN66XX: { 2054 struct octeon_cn6xxx *cn6xxx = 2055 (struct octeon_cn6xxx *)oct->chip; 2056 2057 if (!intr_coal->rx_max_coalesced_frames) 2058 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; 2059 else 2060 rx_max_coalesced_frames = 2061 intr_coal->rx_max_coalesced_frames; 2062 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 2063 rx_max_coalesced_frames); 2064 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); 2065 break; 2066 } 2067 case OCTEON_CN23XX_PF_VID: { 2068 int q_no; 2069 2070 if (!intr_coal->rx_max_coalesced_frames) 2071 rx_max_coalesced_frames = intrmod->rx_frames; 2072 else 2073 rx_max_coalesced_frames = 2074 intr_coal->rx_max_coalesced_frames; 2075 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2076 q_no += oct->sriov_info.pf_srn; 2077 octeon_write_csr64( 2078 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2079 (octeon_read_csr64( 2080 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2081 (0x3fffff00000000UL)) | 2082 (rx_max_coalesced_frames - 1)); 2083 /*consider setting resend bit*/ 2084 } 2085 intrmod->rx_frames = rx_max_coalesced_frames; 2086 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2087 break; 2088 } 2089 case OCTEON_CN23XX_VF_VID: { 2090 int q_no; 2091 2092 if (!intr_coal->rx_max_coalesced_frames) 2093 rx_max_coalesced_frames = intrmod->rx_frames; 2094 else 2095 rx_max_coalesced_frames = 2096 intr_coal->rx_max_coalesced_frames; 2097 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2098 octeon_write_csr64( 2099 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2100 (octeon_read_csr64( 2101 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & 2102 (0x3fffff00000000UL)) | 2103 (rx_max_coalesced_frames - 1)); 2104 /*consider writing to resend bit here*/ 2105 } 2106 intrmod->rx_frames = rx_max_coalesced_frames; 2107 oct->rx_max_coalesced_frames = rx_max_coalesced_frames; 2108 break; 2109 } 2110 default: 2111 return -EINVAL; 2112 } 2113 return 0; 2114 } 2115 2116 static int oct_cfg_rx_intrtime(struct lio *lio, 2117 struct oct_intrmod_cfg *intrmod, 2118 struct ethtool_coalesce *intr_coal) 2119 { 2120 struct octeon_device *oct = lio->oct_dev; 2121 u32 time_threshold, rx_coalesce_usecs; 2122 2123 /* Config Time based interrupt values */ 2124 switch (oct->chip_id) { 2125 case OCTEON_CN68XX: 2126 case OCTEON_CN66XX: { 2127 struct octeon_cn6xxx *cn6xxx = 2128 (struct octeon_cn6xxx *)oct->chip; 2129 if (!intr_coal->rx_coalesce_usecs) 2130 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; 2131 else 2132 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2133 2134 time_threshold = lio_cn6xxx_get_oq_ticks(oct, 2135 rx_coalesce_usecs); 2136 octeon_write_csr(oct, 2137 CN6XXX_SLI_OQ_INT_LEVEL_TIME, 2138 time_threshold); 2139 2140 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); 2141 break; 2142 } 2143 case OCTEON_CN23XX_PF_VID: { 2144 u64 time_threshold; 2145 int q_no; 2146 2147 if (!intr_coal->rx_coalesce_usecs) 2148 rx_coalesce_usecs = intrmod->rx_usecs; 2149 else 2150 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2151 time_threshold = 2152 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2153 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2154 q_no += oct->sriov_info.pf_srn; 2155 octeon_write_csr64(oct, 2156 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 2157 (intrmod->rx_frames | 2158 ((u64)time_threshold << 32))); 2159 /*consider writing to resend bit here*/ 2160 } 2161 intrmod->rx_usecs = rx_coalesce_usecs; 2162 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2163 break; 2164 } 2165 case OCTEON_CN23XX_VF_VID: { 2166 u64 time_threshold; 2167 int q_no; 2168 2169 if (!intr_coal->rx_coalesce_usecs) 2170 rx_coalesce_usecs = intrmod->rx_usecs; 2171 else 2172 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; 2173 2174 time_threshold = 2175 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); 2176 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 2177 octeon_write_csr64( 2178 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), 2179 (intrmod->rx_frames | 2180 ((u64)time_threshold << 32))); 2181 /*consider setting resend bit*/ 2182 } 2183 intrmod->rx_usecs = rx_coalesce_usecs; 2184 oct->rx_coalesce_usecs = rx_coalesce_usecs; 2185 break; 2186 } 2187 default: 2188 return -EINVAL; 2189 } 2190 2191 return 0; 2192 } 2193 2194 static int 2195 oct_cfg_tx_intrcnt(struct lio *lio, 2196 struct oct_intrmod_cfg *intrmod, 2197 struct ethtool_coalesce *intr_coal) 2198 { 2199 struct octeon_device *oct = lio->oct_dev; 2200 u32 iq_intr_pkt; 2201 void __iomem *inst_cnt_reg; 2202 u64 val; 2203 2204 /* Config Cnt based interrupt values */ 2205 switch (oct->chip_id) { 2206 case OCTEON_CN68XX: 2207 case OCTEON_CN66XX: 2208 break; 2209 case OCTEON_CN23XX_VF_VID: 2210 case OCTEON_CN23XX_PF_VID: { 2211 int q_no; 2212 2213 if (!intr_coal->tx_max_coalesced_frames) 2214 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & 2215 CN23XX_PKT_IN_DONE_WMARK_MASK; 2216 else 2217 iq_intr_pkt = intr_coal->tx_max_coalesced_frames & 2218 CN23XX_PKT_IN_DONE_WMARK_MASK; 2219 for (q_no = 0; q_no < oct->num_iqs; q_no++) { 2220 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; 2221 val = readq(inst_cnt_reg); 2222 /*clear wmark and count.dont want to write count back*/ 2223 val = (val & 0xFFFF000000000000ULL) | 2224 ((u64)(iq_intr_pkt - 1) 2225 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); 2226 writeq(val, inst_cnt_reg); 2227 /*consider setting resend bit*/ 2228 } 2229 intrmod->tx_frames = iq_intr_pkt; 2230 oct->tx_max_coalesced_frames = iq_intr_pkt; 2231 break; 2232 } 2233 default: 2234 return -EINVAL; 2235 } 2236 return 0; 2237 } 2238 2239 static int lio_set_intr_coalesce(struct net_device *netdev, 2240 struct ethtool_coalesce *intr_coal) 2241 { 2242 struct lio *lio = GET_LIO(netdev); 2243 int ret; 2244 struct octeon_device *oct = lio->oct_dev; 2245 struct oct_intrmod_cfg intrmod = {0}; 2246 u32 j, q_no; 2247 int db_max, db_min; 2248 2249 switch (oct->chip_id) { 2250 case OCTEON_CN68XX: 2251 case OCTEON_CN66XX: 2252 db_min = CN6XXX_DB_MIN; 2253 db_max = CN6XXX_DB_MAX; 2254 if ((intr_coal->tx_max_coalesced_frames >= db_min) && 2255 (intr_coal->tx_max_coalesced_frames <= db_max)) { 2256 for (j = 0; j < lio->linfo.num_txpciq; j++) { 2257 q_no = lio->linfo.txpciq[j].s.q_no; 2258 oct->instr_queue[q_no]->fill_threshold = 2259 intr_coal->tx_max_coalesced_frames; 2260 } 2261 } else { 2262 dev_err(&oct->pci_dev->dev, 2263 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", 2264 intr_coal->tx_max_coalesced_frames, 2265 db_min, db_max); 2266 return -EINVAL; 2267 } 2268 break; 2269 case OCTEON_CN23XX_PF_VID: 2270 case OCTEON_CN23XX_VF_VID: 2271 break; 2272 default: 2273 return -EINVAL; 2274 } 2275 2276 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; 2277 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; 2278 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2279 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2280 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2281 2282 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); 2283 2284 if (!intr_coal->use_adaptive_rx_coalesce) { 2285 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); 2286 if (ret) 2287 goto ret_intrmod; 2288 2289 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); 2290 if (ret) 2291 goto ret_intrmod; 2292 } else { 2293 oct->rx_coalesce_usecs = 2294 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 2295 oct->rx_max_coalesced_frames = 2296 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 2297 } 2298 2299 if (!intr_coal->use_adaptive_tx_coalesce) { 2300 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); 2301 if (ret) 2302 goto ret_intrmod; 2303 } else { 2304 oct->tx_max_coalesced_frames = 2305 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 2306 } 2307 2308 return 0; 2309 ret_intrmod: 2310 return ret; 2311 } 2312 2313 static int lio_get_ts_info(struct net_device *netdev, 2314 struct ethtool_ts_info *info) 2315 { 2316 struct lio *lio = GET_LIO(netdev); 2317 2318 info->so_timestamping = 2319 #ifdef PTP_HARDWARE_TIMESTAMPING 2320 SOF_TIMESTAMPING_TX_HARDWARE | 2321 SOF_TIMESTAMPING_RX_HARDWARE | 2322 SOF_TIMESTAMPING_RAW_HARDWARE | 2323 SOF_TIMESTAMPING_TX_SOFTWARE | 2324 #endif 2325 SOF_TIMESTAMPING_RX_SOFTWARE | 2326 SOF_TIMESTAMPING_SOFTWARE; 2327 2328 if (lio->ptp_clock) 2329 info->phc_index = ptp_clock_index(lio->ptp_clock); 2330 else 2331 info->phc_index = -1; 2332 2333 #ifdef PTP_HARDWARE_TIMESTAMPING 2334 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 2335 2336 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2337 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 2338 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2339 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 2340 #endif 2341 2342 return 0; 2343 } 2344 2345 /* Return register dump len. */ 2346 static int lio_get_regs_len(struct net_device *dev) 2347 { 2348 struct lio *lio = GET_LIO(dev); 2349 struct octeon_device *oct = lio->oct_dev; 2350 2351 switch (oct->chip_id) { 2352 case OCTEON_CN23XX_PF_VID: 2353 return OCT_ETHTOOL_REGDUMP_LEN_23XX; 2354 case OCTEON_CN23XX_VF_VID: 2355 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; 2356 default: 2357 return OCT_ETHTOOL_REGDUMP_LEN; 2358 } 2359 } 2360 2361 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) 2362 { 2363 u32 reg; 2364 u8 pf_num = oct->pf_num; 2365 int len = 0; 2366 int i; 2367 2368 /* PCI Window Registers */ 2369 2370 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2371 2372 /*0x29030 or 0x29040*/ 2373 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); 2374 len += sprintf(s + len, 2375 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", 2376 reg, oct->pcie_port, oct->pf_num, 2377 (u64)octeon_read_csr64(oct, reg)); 2378 2379 /*0x27080 or 0x27090*/ 2380 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 2381 len += 2382 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", 2383 reg, oct->pcie_port, oct->pf_num, 2384 (u64)octeon_read_csr64(oct, reg)); 2385 2386 /*0x27000 or 0x27010*/ 2387 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 2388 len += 2389 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", 2390 reg, oct->pcie_port, oct->pf_num, 2391 (u64)octeon_read_csr64(oct, reg)); 2392 2393 /*0x29120*/ 2394 reg = 0x29120; 2395 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, 2396 (u64)octeon_read_csr64(oct, reg)); 2397 2398 /*0x27300*/ 2399 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2400 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2401 len += sprintf( 2402 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, 2403 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); 2404 2405 /*0x27200*/ 2406 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + 2407 (oct->pf_num) * CN23XX_PF_INT_OFFSET; 2408 len += sprintf(s + len, 2409 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", 2410 reg, oct->pcie_port, oct->pf_num, 2411 (u64)octeon_read_csr64(oct, reg)); 2412 2413 /*29130*/ 2414 reg = CN23XX_SLI_PKT_CNT_INT; 2415 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, 2416 (u64)octeon_read_csr64(oct, reg)); 2417 2418 /*0x29140*/ 2419 reg = CN23XX_SLI_PKT_TIME_INT; 2420 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, 2421 (u64)octeon_read_csr64(oct, reg)); 2422 2423 /*0x29160*/ 2424 reg = 0x29160; 2425 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, 2426 (u64)octeon_read_csr64(oct, reg)); 2427 2428 /*0x29180*/ 2429 reg = CN23XX_SLI_OQ_WMARK; 2430 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", 2431 reg, (u64)octeon_read_csr64(oct, reg)); 2432 2433 /*0x291E0*/ 2434 reg = CN23XX_SLI_PKT_IOQ_RING_RST; 2435 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, 2436 (u64)octeon_read_csr64(oct, reg)); 2437 2438 /*0x29210*/ 2439 reg = CN23XX_SLI_GBL_CONTROL; 2440 len += sprintf(s + len, 2441 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, 2442 (u64)octeon_read_csr64(oct, reg)); 2443 2444 /*0x29220*/ 2445 reg = 0x29220; 2446 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", 2447 reg, (u64)octeon_read_csr64(oct, reg)); 2448 2449 /*PF only*/ 2450 if (pf_num == 0) { 2451 /*0x29260*/ 2452 reg = CN23XX_SLI_OUT_BP_EN_W1S; 2453 len += sprintf(s + len, 2454 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", 2455 reg, (u64)octeon_read_csr64(oct, reg)); 2456 } else if (pf_num == 1) { 2457 /*0x29270*/ 2458 reg = CN23XX_SLI_OUT_BP_EN2_W1S; 2459 len += sprintf(s + len, 2460 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", 2461 reg, (u64)octeon_read_csr64(oct, reg)); 2462 } 2463 2464 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2465 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); 2466 len += 2467 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2468 reg, i, (u64)octeon_read_csr64(oct, reg)); 2469 } 2470 2471 /*0x10040*/ 2472 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2473 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2474 len += sprintf(s + len, 2475 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2476 reg, i, (u64)octeon_read_csr64(oct, reg)); 2477 } 2478 2479 /*0x10080*/ 2480 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2481 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); 2482 len += sprintf(s + len, 2483 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2484 reg, i, (u64)octeon_read_csr64(oct, reg)); 2485 } 2486 2487 /*0x10090*/ 2488 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2489 reg = CN23XX_SLI_OQ_SIZE(i); 2490 len += sprintf( 2491 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2492 reg, i, (u64)octeon_read_csr64(oct, reg)); 2493 } 2494 2495 /*0x10050*/ 2496 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2497 reg = CN23XX_SLI_OQ_PKT_CONTROL(i); 2498 len += sprintf( 2499 s + len, 2500 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2501 reg, i, (u64)octeon_read_csr64(oct, reg)); 2502 } 2503 2504 /*0x10070*/ 2505 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2506 reg = CN23XX_SLI_OQ_BASE_ADDR64(i); 2507 len += sprintf(s + len, 2508 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2509 reg, i, (u64)octeon_read_csr64(oct, reg)); 2510 } 2511 2512 /*0x100a0*/ 2513 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2514 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); 2515 len += sprintf(s + len, 2516 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2517 reg, i, (u64)octeon_read_csr64(oct, reg)); 2518 } 2519 2520 /*0x100b0*/ 2521 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2522 reg = CN23XX_SLI_OQ_PKTS_SENT(i); 2523 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2524 reg, i, (u64)octeon_read_csr64(oct, reg)); 2525 } 2526 2527 /*0x100c0*/ 2528 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { 2529 reg = 0x100c0 + i * CN23XX_OQ_OFFSET; 2530 len += sprintf(s + len, 2531 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2532 reg, i, (u64)octeon_read_csr64(oct, reg)); 2533 2534 /*0x10000*/ 2535 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2536 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); 2537 len += sprintf( 2538 s + len, 2539 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2540 reg, i, (u64)octeon_read_csr64(oct, reg)); 2541 } 2542 2543 /*0x10010*/ 2544 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2545 reg = CN23XX_SLI_IQ_BASE_ADDR64(i); 2546 len += sprintf( 2547 s + len, 2548 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, 2549 i, (u64)octeon_read_csr64(oct, reg)); 2550 } 2551 2552 /*0x10020*/ 2553 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2554 reg = CN23XX_SLI_IQ_DOORBELL(i); 2555 len += sprintf( 2556 s + len, 2557 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2558 reg, i, (u64)octeon_read_csr64(oct, reg)); 2559 } 2560 2561 /*0x10030*/ 2562 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { 2563 reg = CN23XX_SLI_IQ_SIZE(i); 2564 len += sprintf( 2565 s + len, 2566 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2567 reg, i, (u64)octeon_read_csr64(oct, reg)); 2568 } 2569 2570 /*0x10040*/ 2571 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) 2572 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); 2573 len += sprintf(s + len, 2574 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2575 reg, i, (u64)octeon_read_csr64(oct, reg)); 2576 } 2577 2578 return len; 2579 } 2580 2581 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) 2582 { 2583 int len = 0; 2584 u32 reg; 2585 int i; 2586 2587 /* PCI Window Registers */ 2588 2589 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2590 2591 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2592 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); 2593 len += sprintf(s + len, 2594 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", 2595 reg, i, (u64)octeon_read_csr64(oct, reg)); 2596 } 2597 2598 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2599 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2600 len += sprintf(s + len, 2601 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2602 reg, i, (u64)octeon_read_csr64(oct, reg)); 2603 } 2604 2605 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2606 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); 2607 len += sprintf(s + len, 2608 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", 2609 reg, i, (u64)octeon_read_csr64(oct, reg)); 2610 } 2611 2612 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2613 reg = CN23XX_VF_SLI_OQ_SIZE(i); 2614 len += sprintf(s + len, 2615 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", 2616 reg, i, (u64)octeon_read_csr64(oct, reg)); 2617 } 2618 2619 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2620 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); 2621 len += sprintf(s + len, 2622 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", 2623 reg, i, (u64)octeon_read_csr64(oct, reg)); 2624 } 2625 2626 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2627 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); 2628 len += sprintf(s + len, 2629 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", 2630 reg, i, (u64)octeon_read_csr64(oct, reg)); 2631 } 2632 2633 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2634 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); 2635 len += sprintf(s + len, 2636 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", 2637 reg, i, (u64)octeon_read_csr64(oct, reg)); 2638 } 2639 2640 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2641 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); 2642 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", 2643 reg, i, (u64)octeon_read_csr64(oct, reg)); 2644 } 2645 2646 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2647 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; 2648 len += sprintf(s + len, 2649 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", 2650 reg, i, (u64)octeon_read_csr64(oct, reg)); 2651 } 2652 2653 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2654 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; 2655 len += sprintf(s + len, 2656 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", 2657 reg, i, (u64)octeon_read_csr64(oct, reg)); 2658 } 2659 2660 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2661 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); 2662 len += sprintf(s + len, 2663 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", 2664 reg, i, (u64)octeon_read_csr64(oct, reg)); 2665 } 2666 2667 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2668 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); 2669 len += sprintf(s + len, 2670 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", 2671 reg, i, (u64)octeon_read_csr64(oct, reg)); 2672 } 2673 2674 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2675 reg = CN23XX_VF_SLI_IQ_DOORBELL(i); 2676 len += sprintf(s + len, 2677 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", 2678 reg, i, (u64)octeon_read_csr64(oct, reg)); 2679 } 2680 2681 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2682 reg = CN23XX_VF_SLI_IQ_SIZE(i); 2683 len += sprintf(s + len, 2684 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", 2685 reg, i, (u64)octeon_read_csr64(oct, reg)); 2686 } 2687 2688 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { 2689 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); 2690 len += sprintf(s + len, 2691 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", 2692 reg, i, (u64)octeon_read_csr64(oct, reg)); 2693 } 2694 2695 return len; 2696 } 2697 2698 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) 2699 { 2700 u32 reg; 2701 int i, len = 0; 2702 2703 /* PCI Window Registers */ 2704 2705 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); 2706 reg = CN6XXX_WIN_WR_ADDR_LO; 2707 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", 2708 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); 2709 reg = CN6XXX_WIN_WR_ADDR_HI; 2710 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", 2711 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); 2712 reg = CN6XXX_WIN_RD_ADDR_LO; 2713 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", 2714 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); 2715 reg = CN6XXX_WIN_RD_ADDR_HI; 2716 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", 2717 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); 2718 reg = CN6XXX_WIN_WR_DATA_LO; 2719 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", 2720 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); 2721 reg = CN6XXX_WIN_WR_DATA_HI; 2722 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", 2723 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); 2724 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", 2725 CN6XXX_WIN_WR_MASK_REG, 2726 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); 2727 2728 /* PCI Interrupt Register */ 2729 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", 2730 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, 2731 CN6XXX_SLI_INT_ENB64_PORT0)); 2732 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", 2733 CN6XXX_SLI_INT_ENB64_PORT1, 2734 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); 2735 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, 2736 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); 2737 2738 /* PCI Output queue registers */ 2739 for (i = 0; i < oct->num_oqs; i++) { 2740 reg = CN6XXX_SLI_OQ_PKTS_SENT(i); 2741 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", 2742 reg, i, octeon_read_csr(oct, reg)); 2743 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); 2744 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", 2745 reg, i, octeon_read_csr(oct, reg)); 2746 } 2747 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; 2748 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", 2749 reg, octeon_read_csr(oct, reg)); 2750 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; 2751 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", 2752 reg, octeon_read_csr(oct, reg)); 2753 2754 /* PCI Input queue registers */ 2755 for (i = 0; i <= 3; i++) { 2756 u32 reg; 2757 2758 reg = CN6XXX_SLI_IQ_DOORBELL(i); 2759 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", 2760 reg, i, octeon_read_csr(oct, reg)); 2761 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); 2762 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", 2763 reg, i, octeon_read_csr(oct, reg)); 2764 } 2765 2766 /* PCI DMA registers */ 2767 2768 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", 2769 CN6XXX_DMA_CNT(0), 2770 octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); 2771 reg = CN6XXX_DMA_PKT_INT_LEVEL(0); 2772 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", 2773 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); 2774 reg = CN6XXX_DMA_TIME_INT_LEVEL(0); 2775 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", 2776 CN6XXX_DMA_TIME_INT_LEVEL(0), 2777 octeon_read_csr(oct, reg)); 2778 2779 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", 2780 CN6XXX_DMA_CNT(1), 2781 octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); 2782 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2783 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", 2784 CN6XXX_DMA_PKT_INT_LEVEL(1), 2785 octeon_read_csr(oct, reg)); 2786 reg = CN6XXX_DMA_PKT_INT_LEVEL(1); 2787 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", 2788 CN6XXX_DMA_TIME_INT_LEVEL(1), 2789 octeon_read_csr(oct, reg)); 2790 2791 /* PCI Index registers */ 2792 2793 len += sprintf(s + len, "\n"); 2794 2795 for (i = 0; i < 16; i++) { 2796 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); 2797 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", 2798 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); 2799 } 2800 2801 return len; 2802 } 2803 2804 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) 2805 { 2806 u32 val; 2807 int i, len = 0; 2808 2809 /* PCI CONFIG Registers */ 2810 2811 len += sprintf(s + len, 2812 "\n\t Octeon Config space Registers\n\n"); 2813 2814 for (i = 0; i <= 13; i++) { 2815 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2816 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2817 (i * 4), i, val); 2818 } 2819 2820 for (i = 30; i <= 34; i++) { 2821 pci_read_config_dword(oct->pci_dev, (i * 4), &val); 2822 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", 2823 (i * 4), i, val); 2824 } 2825 2826 return len; 2827 } 2828 2829 /* Return register dump user app. */ 2830 static void lio_get_regs(struct net_device *dev, 2831 struct ethtool_regs *regs, void *regbuf) 2832 { 2833 struct lio *lio = GET_LIO(dev); 2834 int len = 0; 2835 struct octeon_device *oct = lio->oct_dev; 2836 2837 regs->version = OCT_ETHTOOL_REGSVER; 2838 2839 switch (oct->chip_id) { 2840 case OCTEON_CN23XX_PF_VID: 2841 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); 2842 len += cn23xx_read_csr_reg(regbuf + len, oct); 2843 break; 2844 case OCTEON_CN23XX_VF_VID: 2845 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); 2846 len += cn23xx_vf_read_csr_reg(regbuf + len, oct); 2847 break; 2848 case OCTEON_CN68XX: 2849 case OCTEON_CN66XX: 2850 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); 2851 len += cn6xxx_read_csr_reg(regbuf + len, oct); 2852 len += cn6xxx_read_config_reg(regbuf + len, oct); 2853 break; 2854 default: 2855 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", 2856 __func__, oct->chip_id); 2857 } 2858 } 2859 2860 static u32 lio_get_priv_flags(struct net_device *netdev) 2861 { 2862 struct lio *lio = GET_LIO(netdev); 2863 2864 return lio->oct_dev->priv_flags; 2865 } 2866 2867 static int lio_set_priv_flags(struct net_device *netdev, u32 flags) 2868 { 2869 struct lio *lio = GET_LIO(netdev); 2870 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); 2871 2872 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, 2873 intr_by_tx_bytes); 2874 return 0; 2875 } 2876 2877 static const struct ethtool_ops lio_ethtool_ops = { 2878 .get_link_ksettings = lio_get_link_ksettings, 2879 .get_link = ethtool_op_get_link, 2880 .get_drvinfo = lio_get_drvinfo, 2881 .get_ringparam = lio_ethtool_get_ringparam, 2882 .set_ringparam = lio_ethtool_set_ringparam, 2883 .get_channels = lio_ethtool_get_channels, 2884 .set_channels = lio_ethtool_set_channels, 2885 .set_phys_id = lio_set_phys_id, 2886 .get_eeprom_len = lio_get_eeprom_len, 2887 .get_eeprom = lio_get_eeprom, 2888 .get_strings = lio_get_strings, 2889 .get_ethtool_stats = lio_get_ethtool_stats, 2890 .get_pauseparam = lio_get_pauseparam, 2891 .set_pauseparam = lio_set_pauseparam, 2892 .get_regs_len = lio_get_regs_len, 2893 .get_regs = lio_get_regs, 2894 .get_msglevel = lio_get_msglevel, 2895 .set_msglevel = lio_set_msglevel, 2896 .get_sset_count = lio_get_sset_count, 2897 .get_coalesce = lio_get_intr_coalesce, 2898 .set_coalesce = lio_set_intr_coalesce, 2899 .get_priv_flags = lio_get_priv_flags, 2900 .set_priv_flags = lio_set_priv_flags, 2901 .get_ts_info = lio_get_ts_info, 2902 }; 2903 2904 static const struct ethtool_ops lio_vf_ethtool_ops = { 2905 .get_link_ksettings = lio_get_link_ksettings, 2906 .get_link = ethtool_op_get_link, 2907 .get_drvinfo = lio_get_vf_drvinfo, 2908 .get_ringparam = lio_ethtool_get_ringparam, 2909 .set_ringparam = lio_ethtool_set_ringparam, 2910 .get_channels = lio_ethtool_get_channels, 2911 .set_channels = lio_ethtool_set_channels, 2912 .get_strings = lio_vf_get_strings, 2913 .get_ethtool_stats = lio_vf_get_ethtool_stats, 2914 .get_regs_len = lio_get_regs_len, 2915 .get_regs = lio_get_regs, 2916 .get_msglevel = lio_get_msglevel, 2917 .set_msglevel = lio_vf_set_msglevel, 2918 .get_sset_count = lio_vf_get_sset_count, 2919 .get_coalesce = lio_get_intr_coalesce, 2920 .set_coalesce = lio_set_intr_coalesce, 2921 .get_priv_flags = lio_get_priv_flags, 2922 .set_priv_flags = lio_set_priv_flags, 2923 .get_ts_info = lio_get_ts_info, 2924 }; 2925 2926 void liquidio_set_ethtool_ops(struct net_device *netdev) 2927 { 2928 struct lio *lio = GET_LIO(netdev); 2929 struct octeon_device *oct = lio->oct_dev; 2930 2931 if (OCTEON_CN23XX_VF(oct)) 2932 netdev->ethtool_ops = &lio_vf_ethtool_ops; 2933 else 2934 netdev->ethtool_ops = &lio_ethtool_ops; 2935 } 2936