1 /******************************************************************************* 2 STMMAC Ethtool support 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 23 *******************************************************************************/ 24 25 #include <linux/etherdevice.h> 26 #include <linux/ethtool.h> 27 #include <linux/interrupt.h> 28 #include <linux/mii.h> 29 #include <linux/phy.h> 30 #include <linux/net_tstamp.h> 31 #include <asm/io.h> 32 33 #include "stmmac.h" 34 #include "dwmac_dma.h" 35 36 #define REG_SPACE_SIZE 0x1054 37 #define MAC100_ETHTOOL_NAME "st_mac100" 38 #define GMAC_ETHTOOL_NAME "st_gmac" 39 40 struct stmmac_stats { 41 char stat_string[ETH_GSTRING_LEN]; 42 int sizeof_stat; 43 int stat_offset; 44 }; 45 46 #define STMMAC_STAT(m) \ 47 { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ 48 offsetof(struct stmmac_priv, xstats.m)} 49 50 static const struct stmmac_stats stmmac_gstrings_stats[] = { 51 /* Transmit errors */ 52 STMMAC_STAT(tx_underflow), 53 STMMAC_STAT(tx_carrier), 54 STMMAC_STAT(tx_losscarrier), 55 STMMAC_STAT(vlan_tag), 56 STMMAC_STAT(tx_deferred), 57 STMMAC_STAT(tx_vlan), 58 STMMAC_STAT(tx_jabber), 59 STMMAC_STAT(tx_frame_flushed), 60 STMMAC_STAT(tx_payload_error), 61 STMMAC_STAT(tx_ip_header_error), 62 /* Receive errors */ 63 STMMAC_STAT(rx_desc), 64 STMMAC_STAT(sa_filter_fail), 65 STMMAC_STAT(overflow_error), 66 STMMAC_STAT(ipc_csum_error), 67 STMMAC_STAT(rx_collision), 68 STMMAC_STAT(rx_crc), 69 STMMAC_STAT(dribbling_bit), 70 STMMAC_STAT(rx_length), 71 STMMAC_STAT(rx_mii), 72 STMMAC_STAT(rx_multicast), 73 STMMAC_STAT(rx_gmac_overflow), 74 STMMAC_STAT(rx_watchdog), 75 STMMAC_STAT(da_rx_filter_fail), 76 STMMAC_STAT(sa_rx_filter_fail), 77 STMMAC_STAT(rx_missed_cntr), 78 STMMAC_STAT(rx_overflow_cntr), 79 STMMAC_STAT(rx_vlan), 80 /* Tx/Rx IRQ error info */ 81 STMMAC_STAT(tx_undeflow_irq), 82 STMMAC_STAT(tx_process_stopped_irq), 83 STMMAC_STAT(tx_jabber_irq), 84 STMMAC_STAT(rx_overflow_irq), 85 STMMAC_STAT(rx_buf_unav_irq), 86 STMMAC_STAT(rx_process_stopped_irq), 87 STMMAC_STAT(rx_watchdog_irq), 88 STMMAC_STAT(tx_early_irq), 89 STMMAC_STAT(fatal_bus_error_irq), 90 /* Tx/Rx IRQ Events */ 91 STMMAC_STAT(rx_early_irq), 92 STMMAC_STAT(threshold), 93 STMMAC_STAT(tx_pkt_n), 94 STMMAC_STAT(rx_pkt_n), 95 STMMAC_STAT(normal_irq_n), 96 STMMAC_STAT(rx_normal_irq_n), 97 STMMAC_STAT(napi_poll), 98 STMMAC_STAT(tx_normal_irq_n), 99 STMMAC_STAT(tx_clean), 100 STMMAC_STAT(tx_reset_ic_bit), 101 STMMAC_STAT(irq_receive_pmt_irq_n), 102 /* MMC info */ 103 STMMAC_STAT(mmc_tx_irq_n), 104 STMMAC_STAT(mmc_rx_irq_n), 105 STMMAC_STAT(mmc_rx_csum_offload_irq_n), 106 /* EEE */ 107 STMMAC_STAT(irq_tx_path_in_lpi_mode_n), 108 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n), 109 STMMAC_STAT(irq_rx_path_in_lpi_mode_n), 110 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), 111 STMMAC_STAT(phy_eee_wakeup_error_n), 112 /* Extended RDES status */ 113 STMMAC_STAT(ip_hdr_err), 114 STMMAC_STAT(ip_payload_err), 115 STMMAC_STAT(ip_csum_bypassed), 116 STMMAC_STAT(ipv4_pkt_rcvd), 117 STMMAC_STAT(ipv6_pkt_rcvd), 118 STMMAC_STAT(rx_msg_type_ext_no_ptp), 119 STMMAC_STAT(rx_msg_type_sync), 120 STMMAC_STAT(rx_msg_type_follow_up), 121 STMMAC_STAT(rx_msg_type_delay_req), 122 STMMAC_STAT(rx_msg_type_delay_resp), 123 STMMAC_STAT(rx_msg_type_pdelay_req), 124 STMMAC_STAT(rx_msg_type_pdelay_resp), 125 STMMAC_STAT(rx_msg_type_pdelay_follow_up), 126 STMMAC_STAT(ptp_frame_type), 127 STMMAC_STAT(ptp_ver), 128 STMMAC_STAT(timestamp_dropped), 129 STMMAC_STAT(av_pkt_rcvd), 130 STMMAC_STAT(av_tagged_pkt_rcvd), 131 STMMAC_STAT(vlan_tag_priority_val), 132 STMMAC_STAT(l3_filter_match), 133 STMMAC_STAT(l4_filter_match), 134 STMMAC_STAT(l3_l4_filter_no_match), 135 /* PCS */ 136 STMMAC_STAT(irq_pcs_ane_n), 137 STMMAC_STAT(irq_pcs_link_n), 138 STMMAC_STAT(irq_rgmii_n), 139 }; 140 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 141 142 /* HW MAC Management counters (if supported) */ 143 #define STMMAC_MMC_STAT(m) \ 144 { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ 145 offsetof(struct stmmac_priv, mmc.m)} 146 147 static const struct stmmac_stats stmmac_mmc[] = { 148 STMMAC_MMC_STAT(mmc_tx_octetcount_gb), 149 STMMAC_MMC_STAT(mmc_tx_framecount_gb), 150 STMMAC_MMC_STAT(mmc_tx_broadcastframe_g), 151 STMMAC_MMC_STAT(mmc_tx_multicastframe_g), 152 STMMAC_MMC_STAT(mmc_tx_64_octets_gb), 153 STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb), 154 STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb), 155 STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb), 156 STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb), 157 STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb), 158 STMMAC_MMC_STAT(mmc_tx_unicast_gb), 159 STMMAC_MMC_STAT(mmc_tx_multicast_gb), 160 STMMAC_MMC_STAT(mmc_tx_broadcast_gb), 161 STMMAC_MMC_STAT(mmc_tx_underflow_error), 162 STMMAC_MMC_STAT(mmc_tx_singlecol_g), 163 STMMAC_MMC_STAT(mmc_tx_multicol_g), 164 STMMAC_MMC_STAT(mmc_tx_deferred), 165 STMMAC_MMC_STAT(mmc_tx_latecol), 166 STMMAC_MMC_STAT(mmc_tx_exesscol), 167 STMMAC_MMC_STAT(mmc_tx_carrier_error), 168 STMMAC_MMC_STAT(mmc_tx_octetcount_g), 169 STMMAC_MMC_STAT(mmc_tx_framecount_g), 170 STMMAC_MMC_STAT(mmc_tx_excessdef), 171 STMMAC_MMC_STAT(mmc_tx_pause_frame), 172 STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), 173 STMMAC_MMC_STAT(mmc_rx_framecount_gb), 174 STMMAC_MMC_STAT(mmc_rx_octetcount_gb), 175 STMMAC_MMC_STAT(mmc_rx_octetcount_g), 176 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), 177 STMMAC_MMC_STAT(mmc_rx_multicastframe_g), 178 STMMAC_MMC_STAT(mmc_rx_crc_errror), 179 STMMAC_MMC_STAT(mmc_rx_align_error), 180 STMMAC_MMC_STAT(mmc_rx_run_error), 181 STMMAC_MMC_STAT(mmc_rx_jabber_error), 182 STMMAC_MMC_STAT(mmc_rx_undersize_g), 183 STMMAC_MMC_STAT(mmc_rx_oversize_g), 184 STMMAC_MMC_STAT(mmc_rx_64_octets_gb), 185 STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb), 186 STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb), 187 STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb), 188 STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb), 189 STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb), 190 STMMAC_MMC_STAT(mmc_rx_unicast_g), 191 STMMAC_MMC_STAT(mmc_rx_length_error), 192 STMMAC_MMC_STAT(mmc_rx_autofrangetype), 193 STMMAC_MMC_STAT(mmc_rx_pause_frames), 194 STMMAC_MMC_STAT(mmc_rx_fifo_overflow), 195 STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), 196 STMMAC_MMC_STAT(mmc_rx_watchdog_error), 197 STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), 198 STMMAC_MMC_STAT(mmc_rx_ipc_intr), 199 STMMAC_MMC_STAT(mmc_rx_ipv4_gd), 200 STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), 201 STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), 202 STMMAC_MMC_STAT(mmc_rx_ipv4_frag), 203 STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl), 204 STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets), 205 STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets), 206 STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets), 207 STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets), 208 STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets), 209 STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets), 210 STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets), 211 STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets), 212 STMMAC_MMC_STAT(mmc_rx_ipv6_gd), 213 STMMAC_MMC_STAT(mmc_rx_ipv6_hderr), 214 STMMAC_MMC_STAT(mmc_rx_ipv6_nopay), 215 STMMAC_MMC_STAT(mmc_rx_udp_gd), 216 STMMAC_MMC_STAT(mmc_rx_udp_err), 217 STMMAC_MMC_STAT(mmc_rx_tcp_gd), 218 STMMAC_MMC_STAT(mmc_rx_tcp_err), 219 STMMAC_MMC_STAT(mmc_rx_icmp_gd), 220 STMMAC_MMC_STAT(mmc_rx_icmp_err), 221 STMMAC_MMC_STAT(mmc_rx_udp_gd_octets), 222 STMMAC_MMC_STAT(mmc_rx_udp_err_octets), 223 STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets), 224 STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), 225 STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), 226 STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), 227 }; 228 #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) 229 230 static void stmmac_ethtool_getdrvinfo(struct net_device *dev, 231 struct ethtool_drvinfo *info) 232 { 233 struct stmmac_priv *priv = netdev_priv(dev); 234 235 if (priv->plat->has_gmac) 236 strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); 237 else 238 strlcpy(info->driver, MAC100_ETHTOOL_NAME, 239 sizeof(info->driver)); 240 241 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 242 } 243 244 static int stmmac_ethtool_getsettings(struct net_device *dev, 245 struct ethtool_cmd *cmd) 246 { 247 struct stmmac_priv *priv = netdev_priv(dev); 248 struct phy_device *phy = priv->phydev; 249 int rc; 250 251 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { 252 struct rgmii_adv adv; 253 254 if (!priv->xstats.pcs_link) { 255 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 256 cmd->duplex = DUPLEX_UNKNOWN; 257 return 0; 258 } 259 cmd->duplex = priv->xstats.pcs_duplex; 260 261 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); 262 263 /* Get and convert ADV/LP_ADV from the HW AN registers */ 264 if (priv->hw->mac->get_adv) 265 priv->hw->mac->get_adv(priv->ioaddr, &adv); 266 else 267 return -EOPNOTSUPP; /* should never happen indeed */ 268 269 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ 270 271 if (adv.pause & STMMAC_PCS_PAUSE) 272 cmd->advertising |= ADVERTISED_Pause; 273 if (adv.pause & STMMAC_PCS_ASYM_PAUSE) 274 cmd->advertising |= ADVERTISED_Asym_Pause; 275 if (adv.lp_pause & STMMAC_PCS_PAUSE) 276 cmd->lp_advertising |= ADVERTISED_Pause; 277 if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) 278 cmd->lp_advertising |= ADVERTISED_Asym_Pause; 279 280 /* Reg49[3] always set because ANE is always supported */ 281 cmd->autoneg = ADVERTISED_Autoneg; 282 cmd->supported |= SUPPORTED_Autoneg; 283 cmd->advertising |= ADVERTISED_Autoneg; 284 cmd->lp_advertising |= ADVERTISED_Autoneg; 285 286 if (adv.duplex) { 287 cmd->supported |= (SUPPORTED_1000baseT_Full | 288 SUPPORTED_100baseT_Full | 289 SUPPORTED_10baseT_Full); 290 cmd->advertising |= (ADVERTISED_1000baseT_Full | 291 ADVERTISED_100baseT_Full | 292 ADVERTISED_10baseT_Full); 293 } else { 294 cmd->supported |= (SUPPORTED_1000baseT_Half | 295 SUPPORTED_100baseT_Half | 296 SUPPORTED_10baseT_Half); 297 cmd->advertising |= (ADVERTISED_1000baseT_Half | 298 ADVERTISED_100baseT_Half | 299 ADVERTISED_10baseT_Half); 300 } 301 if (adv.lp_duplex) 302 cmd->lp_advertising |= (ADVERTISED_1000baseT_Full | 303 ADVERTISED_100baseT_Full | 304 ADVERTISED_10baseT_Full); 305 else 306 cmd->lp_advertising |= (ADVERTISED_1000baseT_Half | 307 ADVERTISED_100baseT_Half | 308 ADVERTISED_10baseT_Half); 309 cmd->port = PORT_OTHER; 310 311 return 0; 312 } 313 314 if (phy == NULL) { 315 pr_err("%s: %s: PHY is not registered\n", 316 __func__, dev->name); 317 return -ENODEV; 318 } 319 if (!netif_running(dev)) { 320 pr_err("%s: interface is disabled: we cannot track " 321 "link speed / duplex setting\n", dev->name); 322 return -EBUSY; 323 } 324 cmd->transceiver = XCVR_INTERNAL; 325 spin_lock_irq(&priv->lock); 326 rc = phy_ethtool_gset(phy, cmd); 327 spin_unlock_irq(&priv->lock); 328 return rc; 329 } 330 331 static int stmmac_ethtool_setsettings(struct net_device *dev, 332 struct ethtool_cmd *cmd) 333 { 334 struct stmmac_priv *priv = netdev_priv(dev); 335 struct phy_device *phy = priv->phydev; 336 int rc; 337 338 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { 339 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; 340 341 /* Only support ANE */ 342 if (cmd->autoneg != AUTONEG_ENABLE) 343 return -EINVAL; 344 345 if (cmd->autoneg == AUTONEG_ENABLE) { 346 mask &= (ADVERTISED_1000baseT_Half | 347 ADVERTISED_1000baseT_Full | 348 ADVERTISED_100baseT_Half | 349 ADVERTISED_100baseT_Full | 350 ADVERTISED_10baseT_Half | 351 ADVERTISED_10baseT_Full); 352 353 spin_lock(&priv->lock); 354 if (priv->hw->mac->ctrl_ane) 355 priv->hw->mac->ctrl_ane(priv->ioaddr, 1); 356 spin_unlock(&priv->lock); 357 } 358 359 return 0; 360 } 361 362 spin_lock(&priv->lock); 363 rc = phy_ethtool_sset(phy, cmd); 364 spin_unlock(&priv->lock); 365 366 return rc; 367 } 368 369 static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) 370 { 371 struct stmmac_priv *priv = netdev_priv(dev); 372 return priv->msg_enable; 373 } 374 375 static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) 376 { 377 struct stmmac_priv *priv = netdev_priv(dev); 378 priv->msg_enable = level; 379 380 } 381 382 static int stmmac_check_if_running(struct net_device *dev) 383 { 384 if (!netif_running(dev)) 385 return -EBUSY; 386 return 0; 387 } 388 389 static int stmmac_ethtool_get_regs_len(struct net_device *dev) 390 { 391 return REG_SPACE_SIZE; 392 } 393 394 static void stmmac_ethtool_gregs(struct net_device *dev, 395 struct ethtool_regs *regs, void *space) 396 { 397 int i; 398 u32 *reg_space = (u32 *) space; 399 400 struct stmmac_priv *priv = netdev_priv(dev); 401 402 memset(reg_space, 0x0, REG_SPACE_SIZE); 403 404 if (!priv->plat->has_gmac) { 405 /* MAC registers */ 406 for (i = 0; i < 12; i++) 407 reg_space[i] = readl(priv->ioaddr + (i * 4)); 408 /* DMA registers */ 409 for (i = 0; i < 9; i++) 410 reg_space[i + 12] = 411 readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); 412 reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); 413 reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); 414 } else { 415 /* MAC registers */ 416 for (i = 0; i < 55; i++) 417 reg_space[i] = readl(priv->ioaddr + (i * 4)); 418 /* DMA registers */ 419 for (i = 0; i < 22; i++) 420 reg_space[i + 55] = 421 readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); 422 } 423 } 424 425 static void 426 stmmac_get_pauseparam(struct net_device *netdev, 427 struct ethtool_pauseparam *pause) 428 { 429 struct stmmac_priv *priv = netdev_priv(netdev); 430 431 if (priv->pcs) /* FIXME */ 432 return; 433 434 spin_lock(&priv->lock); 435 436 pause->rx_pause = 0; 437 pause->tx_pause = 0; 438 pause->autoneg = priv->phydev->autoneg; 439 440 if (priv->flow_ctrl & FLOW_RX) 441 pause->rx_pause = 1; 442 if (priv->flow_ctrl & FLOW_TX) 443 pause->tx_pause = 1; 444 445 spin_unlock(&priv->lock); 446 } 447 448 static int 449 stmmac_set_pauseparam(struct net_device *netdev, 450 struct ethtool_pauseparam *pause) 451 { 452 struct stmmac_priv *priv = netdev_priv(netdev); 453 struct phy_device *phy = priv->phydev; 454 int new_pause = FLOW_OFF; 455 int ret = 0; 456 457 if (priv->pcs) /* FIXME */ 458 return -EOPNOTSUPP; 459 460 spin_lock(&priv->lock); 461 462 if (pause->rx_pause) 463 new_pause |= FLOW_RX; 464 if (pause->tx_pause) 465 new_pause |= FLOW_TX; 466 467 priv->flow_ctrl = new_pause; 468 phy->autoneg = pause->autoneg; 469 470 if (phy->autoneg) { 471 if (netif_running(netdev)) 472 ret = phy_start_aneg(phy); 473 } else 474 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex, 475 priv->flow_ctrl, priv->pause); 476 spin_unlock(&priv->lock); 477 return ret; 478 } 479 480 static void stmmac_get_ethtool_stats(struct net_device *dev, 481 struct ethtool_stats *dummy, u64 *data) 482 { 483 struct stmmac_priv *priv = netdev_priv(dev); 484 int i, j = 0; 485 486 /* Update the DMA HW counters for dwmac10/100 */ 487 if (!priv->plat->has_gmac) 488 priv->hw->dma->dma_diagnostic_fr(&dev->stats, 489 (void *) &priv->xstats, 490 priv->ioaddr); 491 else { 492 /* If supported, for new GMAC chips expose the MMC counters */ 493 if (priv->dma_cap.rmon) { 494 dwmac_mmc_read(priv->ioaddr, &priv->mmc); 495 496 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { 497 char *p; 498 p = (char *)priv + stmmac_mmc[i].stat_offset; 499 500 data[j++] = (stmmac_mmc[i].sizeof_stat == 501 sizeof(u64)) ? (*(u64 *)p) : 502 (*(u32 *)p); 503 } 504 } 505 if (priv->eee_enabled) { 506 int val = phy_get_eee_err(priv->phydev); 507 if (val) 508 priv->xstats.phy_eee_wakeup_error_n = val; 509 } 510 } 511 for (i = 0; i < STMMAC_STATS_LEN; i++) { 512 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 513 data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == 514 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); 515 } 516 } 517 518 static int stmmac_get_sset_count(struct net_device *netdev, int sset) 519 { 520 struct stmmac_priv *priv = netdev_priv(netdev); 521 int len; 522 523 switch (sset) { 524 case ETH_SS_STATS: 525 len = STMMAC_STATS_LEN; 526 527 if (priv->dma_cap.rmon) 528 len += STMMAC_MMC_STATS_LEN; 529 530 return len; 531 default: 532 return -EOPNOTSUPP; 533 } 534 } 535 536 static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) 537 { 538 int i; 539 u8 *p = data; 540 struct stmmac_priv *priv = netdev_priv(dev); 541 542 switch (stringset) { 543 case ETH_SS_STATS: 544 if (priv->dma_cap.rmon) 545 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { 546 memcpy(p, stmmac_mmc[i].stat_string, 547 ETH_GSTRING_LEN); 548 p += ETH_GSTRING_LEN; 549 } 550 for (i = 0; i < STMMAC_STATS_LEN; i++) { 551 memcpy(p, stmmac_gstrings_stats[i].stat_string, 552 ETH_GSTRING_LEN); 553 p += ETH_GSTRING_LEN; 554 } 555 break; 556 default: 557 WARN_ON(1); 558 break; 559 } 560 } 561 562 /* Currently only support WOL through Magic packet. */ 563 static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 564 { 565 struct stmmac_priv *priv = netdev_priv(dev); 566 567 spin_lock_irq(&priv->lock); 568 if (device_can_wakeup(priv->device)) { 569 wol->supported = WAKE_MAGIC | WAKE_UCAST; 570 wol->wolopts = priv->wolopts; 571 } 572 spin_unlock_irq(&priv->lock); 573 } 574 575 static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 576 { 577 struct stmmac_priv *priv = netdev_priv(dev); 578 u32 support = WAKE_MAGIC | WAKE_UCAST; 579 580 /* By default almost all GMAC devices support the WoL via 581 * magic frame but we can disable it if the HW capability 582 * register shows no support for pmt_magic_frame. */ 583 if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) 584 wol->wolopts &= ~WAKE_MAGIC; 585 586 if (!device_can_wakeup(priv->device)) 587 return -EINVAL; 588 589 if (wol->wolopts & ~support) 590 return -EINVAL; 591 592 if (wol->wolopts) { 593 pr_info("stmmac: wakeup enable\n"); 594 device_set_wakeup_enable(priv->device, 1); 595 enable_irq_wake(priv->wol_irq); 596 } else { 597 device_set_wakeup_enable(priv->device, 0); 598 disable_irq_wake(priv->wol_irq); 599 } 600 601 spin_lock_irq(&priv->lock); 602 priv->wolopts = wol->wolopts; 603 spin_unlock_irq(&priv->lock); 604 605 return 0; 606 } 607 608 static int stmmac_ethtool_op_get_eee(struct net_device *dev, 609 struct ethtool_eee *edata) 610 { 611 struct stmmac_priv *priv = netdev_priv(dev); 612 613 if (!priv->dma_cap.eee) 614 return -EOPNOTSUPP; 615 616 edata->eee_enabled = priv->eee_enabled; 617 edata->eee_active = priv->eee_active; 618 edata->tx_lpi_timer = priv->tx_lpi_timer; 619 620 return phy_ethtool_get_eee(priv->phydev, edata); 621 } 622 623 static int stmmac_ethtool_op_set_eee(struct net_device *dev, 624 struct ethtool_eee *edata) 625 { 626 struct stmmac_priv *priv = netdev_priv(dev); 627 628 priv->eee_enabled = edata->eee_enabled; 629 630 if (!priv->eee_enabled) 631 stmmac_disable_eee_mode(priv); 632 else { 633 /* We are asking for enabling the EEE but it is safe 634 * to verify all by invoking the eee_init function. 635 * In case of failure it will return an error. 636 */ 637 priv->eee_enabled = stmmac_eee_init(priv); 638 if (!priv->eee_enabled) 639 return -EOPNOTSUPP; 640 641 /* Do not change tx_lpi_timer in case of failure */ 642 priv->tx_lpi_timer = edata->tx_lpi_timer; 643 } 644 645 return phy_ethtool_set_eee(priv->phydev, edata); 646 } 647 648 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) 649 { 650 unsigned long clk = clk_get_rate(priv->stmmac_clk); 651 652 if (!clk) 653 return 0; 654 655 return (usec * (clk / 1000000)) / 256; 656 } 657 658 static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) 659 { 660 unsigned long clk = clk_get_rate(priv->stmmac_clk); 661 662 if (!clk) 663 return 0; 664 665 return (riwt * 256) / (clk / 1000000); 666 } 667 668 static int stmmac_get_coalesce(struct net_device *dev, 669 struct ethtool_coalesce *ec) 670 { 671 struct stmmac_priv *priv = netdev_priv(dev); 672 673 ec->tx_coalesce_usecs = priv->tx_coal_timer; 674 ec->tx_max_coalesced_frames = priv->tx_coal_frames; 675 676 if (priv->use_riwt) 677 ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); 678 679 return 0; 680 } 681 682 static int stmmac_set_coalesce(struct net_device *dev, 683 struct ethtool_coalesce *ec) 684 { 685 struct stmmac_priv *priv = netdev_priv(dev); 686 unsigned int rx_riwt; 687 688 /* Check not supported parameters */ 689 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || 690 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || 691 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || 692 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || 693 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || 694 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || 695 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || 696 (ec->rx_max_coalesced_frames_high) || 697 (ec->tx_max_coalesced_frames_irq) || 698 (ec->stats_block_coalesce_usecs) || 699 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) 700 return -EOPNOTSUPP; 701 702 if (ec->rx_coalesce_usecs == 0) 703 return -EINVAL; 704 705 if ((ec->tx_coalesce_usecs == 0) && 706 (ec->tx_max_coalesced_frames == 0)) 707 return -EINVAL; 708 709 if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) || 710 (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) 711 return -EINVAL; 712 713 rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); 714 715 if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) 716 return -EINVAL; 717 else if (!priv->use_riwt) 718 return -EOPNOTSUPP; 719 720 /* Only copy relevant parameters, ignore all others. */ 721 priv->tx_coal_frames = ec->tx_max_coalesced_frames; 722 priv->tx_coal_timer = ec->tx_coalesce_usecs; 723 priv->rx_riwt = rx_riwt; 724 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); 725 726 return 0; 727 } 728 729 static int stmmac_get_ts_info(struct net_device *dev, 730 struct ethtool_ts_info *info) 731 { 732 struct stmmac_priv *priv = netdev_priv(dev); 733 734 if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) { 735 736 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 737 SOF_TIMESTAMPING_RX_HARDWARE | 738 SOF_TIMESTAMPING_RAW_HARDWARE; 739 740 if (priv->ptp_clock) 741 info->phc_index = ptp_clock_index(priv->ptp_clock); 742 743 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 744 745 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | 746 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 747 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 748 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 749 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 750 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 751 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 752 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | 753 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | 754 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 755 (1 << HWTSTAMP_FILTER_ALL)); 756 return 0; 757 } else 758 return ethtool_op_get_ts_info(dev, info); 759 } 760 761 static const struct ethtool_ops stmmac_ethtool_ops = { 762 .begin = stmmac_check_if_running, 763 .get_drvinfo = stmmac_ethtool_getdrvinfo, 764 .get_settings = stmmac_ethtool_getsettings, 765 .set_settings = stmmac_ethtool_setsettings, 766 .get_msglevel = stmmac_ethtool_getmsglevel, 767 .set_msglevel = stmmac_ethtool_setmsglevel, 768 .get_regs = stmmac_ethtool_gregs, 769 .get_regs_len = stmmac_ethtool_get_regs_len, 770 .get_link = ethtool_op_get_link, 771 .get_pauseparam = stmmac_get_pauseparam, 772 .set_pauseparam = stmmac_set_pauseparam, 773 .get_ethtool_stats = stmmac_get_ethtool_stats, 774 .get_strings = stmmac_get_strings, 775 .get_wol = stmmac_get_wol, 776 .set_wol = stmmac_set_wol, 777 .get_eee = stmmac_ethtool_op_get_eee, 778 .set_eee = stmmac_ethtool_op_set_eee, 779 .get_sset_count = stmmac_get_sset_count, 780 .get_ts_info = stmmac_get_ts_info, 781 .get_coalesce = stmmac_get_coalesce, 782 .set_coalesce = stmmac_set_coalesce, 783 }; 784 785 void stmmac_set_ethtool_ops(struct net_device *netdev) 786 { 787 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); 788 } 789