1 /******************************************************************************* 2 STMMAC Ethtool support 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 The full GNU General Public License is included in this distribution in 16 the file called "COPYING". 17 18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 19 *******************************************************************************/ 20 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/interrupt.h> 24 #include <linux/mii.h> 25 #include <linux/phy.h> 26 #include <linux/net_tstamp.h> 27 #include <asm/io.h> 28 29 #include "stmmac.h" 30 #include "dwmac_dma.h" 31 32 #define REG_SPACE_SIZE 0x1060 33 #define MAC100_ETHTOOL_NAME "st_mac100" 34 #define GMAC_ETHTOOL_NAME "st_gmac" 35 36 #define ETHTOOL_DMA_OFFSET 55 37 38 struct stmmac_stats { 39 char stat_string[ETH_GSTRING_LEN]; 40 int sizeof_stat; 41 int stat_offset; 42 }; 43 44 #define STMMAC_STAT(m) \ 45 { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ 46 offsetof(struct stmmac_priv, xstats.m)} 47 48 static const struct stmmac_stats stmmac_gstrings_stats[] = { 49 /* Transmit errors */ 50 STMMAC_STAT(tx_underflow), 51 STMMAC_STAT(tx_carrier), 52 STMMAC_STAT(tx_losscarrier), 53 STMMAC_STAT(vlan_tag), 54 STMMAC_STAT(tx_deferred), 55 STMMAC_STAT(tx_vlan), 56 STMMAC_STAT(tx_jabber), 57 STMMAC_STAT(tx_frame_flushed), 58 STMMAC_STAT(tx_payload_error), 59 STMMAC_STAT(tx_ip_header_error), 60 /* Receive errors */ 61 STMMAC_STAT(rx_desc), 62 STMMAC_STAT(sa_filter_fail), 63 STMMAC_STAT(overflow_error), 64 STMMAC_STAT(ipc_csum_error), 65 STMMAC_STAT(rx_collision), 66 STMMAC_STAT(rx_crc_errors), 67 STMMAC_STAT(dribbling_bit), 68 STMMAC_STAT(rx_length), 69 STMMAC_STAT(rx_mii), 70 STMMAC_STAT(rx_multicast), 71 STMMAC_STAT(rx_gmac_overflow), 72 STMMAC_STAT(rx_watchdog), 73 STMMAC_STAT(da_rx_filter_fail), 74 STMMAC_STAT(sa_rx_filter_fail), 75 STMMAC_STAT(rx_missed_cntr), 76 STMMAC_STAT(rx_overflow_cntr), 77 STMMAC_STAT(rx_vlan), 78 /* Tx/Rx IRQ error info */ 79 STMMAC_STAT(tx_undeflow_irq), 80 STMMAC_STAT(tx_process_stopped_irq), 81 STMMAC_STAT(tx_jabber_irq), 82 STMMAC_STAT(rx_overflow_irq), 83 STMMAC_STAT(rx_buf_unav_irq), 84 STMMAC_STAT(rx_process_stopped_irq), 85 STMMAC_STAT(rx_watchdog_irq), 86 STMMAC_STAT(tx_early_irq), 87 STMMAC_STAT(fatal_bus_error_irq), 88 /* Tx/Rx IRQ Events */ 89 STMMAC_STAT(rx_early_irq), 90 STMMAC_STAT(threshold), 91 STMMAC_STAT(tx_pkt_n), 92 STMMAC_STAT(rx_pkt_n), 93 STMMAC_STAT(normal_irq_n), 94 STMMAC_STAT(rx_normal_irq_n), 95 STMMAC_STAT(napi_poll), 96 STMMAC_STAT(tx_normal_irq_n), 97 STMMAC_STAT(tx_clean), 98 STMMAC_STAT(tx_set_ic_bit), 99 STMMAC_STAT(irq_receive_pmt_irq_n), 100 /* MMC info */ 101 STMMAC_STAT(mmc_tx_irq_n), 102 STMMAC_STAT(mmc_rx_irq_n), 103 STMMAC_STAT(mmc_rx_csum_offload_irq_n), 104 /* EEE */ 105 STMMAC_STAT(irq_tx_path_in_lpi_mode_n), 106 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n), 107 STMMAC_STAT(irq_rx_path_in_lpi_mode_n), 108 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), 109 STMMAC_STAT(phy_eee_wakeup_error_n), 110 /* Extended RDES status */ 111 STMMAC_STAT(ip_hdr_err), 112 STMMAC_STAT(ip_payload_err), 113 STMMAC_STAT(ip_csum_bypassed), 114 STMMAC_STAT(ipv4_pkt_rcvd), 115 STMMAC_STAT(ipv6_pkt_rcvd), 116 STMMAC_STAT(no_ptp_rx_msg_type_ext), 117 STMMAC_STAT(ptp_rx_msg_type_sync), 118 STMMAC_STAT(ptp_rx_msg_type_follow_up), 119 STMMAC_STAT(ptp_rx_msg_type_delay_req), 120 STMMAC_STAT(ptp_rx_msg_type_delay_resp), 121 STMMAC_STAT(ptp_rx_msg_type_pdelay_req), 122 STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), 123 STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), 124 STMMAC_STAT(ptp_rx_msg_type_announce), 125 STMMAC_STAT(ptp_rx_msg_type_management), 126 STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), 127 STMMAC_STAT(ptp_frame_type), 128 STMMAC_STAT(ptp_ver), 129 STMMAC_STAT(timestamp_dropped), 130 STMMAC_STAT(av_pkt_rcvd), 131 STMMAC_STAT(av_tagged_pkt_rcvd), 132 STMMAC_STAT(vlan_tag_priority_val), 133 STMMAC_STAT(l3_filter_match), 134 STMMAC_STAT(l4_filter_match), 135 STMMAC_STAT(l3_l4_filter_no_match), 136 /* PCS */ 137 STMMAC_STAT(irq_pcs_ane_n), 138 STMMAC_STAT(irq_pcs_link_n), 139 STMMAC_STAT(irq_rgmii_n), 140 /* DEBUG */ 141 STMMAC_STAT(mtl_tx_status_fifo_full), 142 STMMAC_STAT(mtl_tx_fifo_not_empty), 143 STMMAC_STAT(mmtl_fifo_ctrl), 144 STMMAC_STAT(mtl_tx_fifo_read_ctrl_write), 145 STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait), 146 STMMAC_STAT(mtl_tx_fifo_read_ctrl_read), 147 STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle), 148 STMMAC_STAT(mac_tx_in_pause), 149 STMMAC_STAT(mac_tx_frame_ctrl_xfer), 150 STMMAC_STAT(mac_tx_frame_ctrl_idle), 151 STMMAC_STAT(mac_tx_frame_ctrl_wait), 152 STMMAC_STAT(mac_tx_frame_ctrl_pause), 153 STMMAC_STAT(mac_gmii_tx_proto_engine), 154 STMMAC_STAT(mtl_rx_fifo_fill_level_full), 155 STMMAC_STAT(mtl_rx_fifo_fill_above_thresh), 156 STMMAC_STAT(mtl_rx_fifo_fill_below_thresh), 157 STMMAC_STAT(mtl_rx_fifo_fill_level_empty), 158 STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush), 159 STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data), 160 STMMAC_STAT(mtl_rx_fifo_read_ctrl_status), 161 STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle), 162 STMMAC_STAT(mtl_rx_fifo_ctrl_active), 163 STMMAC_STAT(mac_rx_frame_ctrl_fifo), 164 STMMAC_STAT(mac_gmii_rx_proto_engine), 165 /* TSO */ 166 STMMAC_STAT(tx_tso_frames), 167 STMMAC_STAT(tx_tso_nfrags), 168 }; 169 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 170 171 /* HW MAC Management counters (if supported) */ 172 #define STMMAC_MMC_STAT(m) \ 173 { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ 174 offsetof(struct stmmac_priv, mmc.m)} 175 176 static const struct stmmac_stats stmmac_mmc[] = { 177 STMMAC_MMC_STAT(mmc_tx_octetcount_gb), 178 STMMAC_MMC_STAT(mmc_tx_framecount_gb), 179 STMMAC_MMC_STAT(mmc_tx_broadcastframe_g), 180 STMMAC_MMC_STAT(mmc_tx_multicastframe_g), 181 STMMAC_MMC_STAT(mmc_tx_64_octets_gb), 182 STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb), 183 STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb), 184 STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb), 185 STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb), 186 STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb), 187 STMMAC_MMC_STAT(mmc_tx_unicast_gb), 188 STMMAC_MMC_STAT(mmc_tx_multicast_gb), 189 STMMAC_MMC_STAT(mmc_tx_broadcast_gb), 190 STMMAC_MMC_STAT(mmc_tx_underflow_error), 191 STMMAC_MMC_STAT(mmc_tx_singlecol_g), 192 STMMAC_MMC_STAT(mmc_tx_multicol_g), 193 STMMAC_MMC_STAT(mmc_tx_deferred), 194 STMMAC_MMC_STAT(mmc_tx_latecol), 195 STMMAC_MMC_STAT(mmc_tx_exesscol), 196 STMMAC_MMC_STAT(mmc_tx_carrier_error), 197 STMMAC_MMC_STAT(mmc_tx_octetcount_g), 198 STMMAC_MMC_STAT(mmc_tx_framecount_g), 199 STMMAC_MMC_STAT(mmc_tx_excessdef), 200 STMMAC_MMC_STAT(mmc_tx_pause_frame), 201 STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), 202 STMMAC_MMC_STAT(mmc_rx_framecount_gb), 203 STMMAC_MMC_STAT(mmc_rx_octetcount_gb), 204 STMMAC_MMC_STAT(mmc_rx_octetcount_g), 205 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), 206 STMMAC_MMC_STAT(mmc_rx_multicastframe_g), 207 STMMAC_MMC_STAT(mmc_rx_crc_error), 208 STMMAC_MMC_STAT(mmc_rx_align_error), 209 STMMAC_MMC_STAT(mmc_rx_run_error), 210 STMMAC_MMC_STAT(mmc_rx_jabber_error), 211 STMMAC_MMC_STAT(mmc_rx_undersize_g), 212 STMMAC_MMC_STAT(mmc_rx_oversize_g), 213 STMMAC_MMC_STAT(mmc_rx_64_octets_gb), 214 STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb), 215 STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb), 216 STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb), 217 STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb), 218 STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb), 219 STMMAC_MMC_STAT(mmc_rx_unicast_g), 220 STMMAC_MMC_STAT(mmc_rx_length_error), 221 STMMAC_MMC_STAT(mmc_rx_autofrangetype), 222 STMMAC_MMC_STAT(mmc_rx_pause_frames), 223 STMMAC_MMC_STAT(mmc_rx_fifo_overflow), 224 STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), 225 STMMAC_MMC_STAT(mmc_rx_watchdog_error), 226 STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), 227 STMMAC_MMC_STAT(mmc_rx_ipc_intr), 228 STMMAC_MMC_STAT(mmc_rx_ipv4_gd), 229 STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), 230 STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), 231 STMMAC_MMC_STAT(mmc_rx_ipv4_frag), 232 STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl), 233 STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets), 234 STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets), 235 STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets), 236 STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets), 237 STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets), 238 STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets), 239 STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets), 240 STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets), 241 STMMAC_MMC_STAT(mmc_rx_ipv6_gd), 242 STMMAC_MMC_STAT(mmc_rx_ipv6_hderr), 243 STMMAC_MMC_STAT(mmc_rx_ipv6_nopay), 244 STMMAC_MMC_STAT(mmc_rx_udp_gd), 245 STMMAC_MMC_STAT(mmc_rx_udp_err), 246 STMMAC_MMC_STAT(mmc_rx_tcp_gd), 247 STMMAC_MMC_STAT(mmc_rx_tcp_err), 248 STMMAC_MMC_STAT(mmc_rx_icmp_gd), 249 STMMAC_MMC_STAT(mmc_rx_icmp_err), 250 STMMAC_MMC_STAT(mmc_rx_udp_gd_octets), 251 STMMAC_MMC_STAT(mmc_rx_udp_err_octets), 252 STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets), 253 STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), 254 STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), 255 STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), 256 }; 257 #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) 258 259 static void stmmac_ethtool_getdrvinfo(struct net_device *dev, 260 struct ethtool_drvinfo *info) 261 { 262 struct stmmac_priv *priv = netdev_priv(dev); 263 264 if (priv->plat->has_gmac || priv->plat->has_gmac4) 265 strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); 266 else 267 strlcpy(info->driver, MAC100_ETHTOOL_NAME, 268 sizeof(info->driver)); 269 270 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 271 } 272 273 static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, 274 struct ethtool_link_ksettings *cmd) 275 { 276 struct stmmac_priv *priv = netdev_priv(dev); 277 struct phy_device *phy = dev->phydev; 278 279 if (priv->hw->pcs & STMMAC_PCS_RGMII || 280 priv->hw->pcs & STMMAC_PCS_SGMII) { 281 struct rgmii_adv adv; 282 u32 supported, advertising, lp_advertising; 283 284 if (!priv->xstats.pcs_link) { 285 cmd->base.speed = SPEED_UNKNOWN; 286 cmd->base.duplex = DUPLEX_UNKNOWN; 287 return 0; 288 } 289 cmd->base.duplex = priv->xstats.pcs_duplex; 290 291 cmd->base.speed = priv->xstats.pcs_speed; 292 293 /* Get and convert ADV/LP_ADV from the HW AN registers */ 294 if (!priv->hw->mac->pcs_get_adv_lp) 295 return -EOPNOTSUPP; /* should never happen indeed */ 296 297 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv); 298 299 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ 300 301 ethtool_convert_link_mode_to_legacy_u32( 302 &supported, cmd->link_modes.supported); 303 ethtool_convert_link_mode_to_legacy_u32( 304 &advertising, cmd->link_modes.advertising); 305 ethtool_convert_link_mode_to_legacy_u32( 306 &lp_advertising, cmd->link_modes.lp_advertising); 307 308 if (adv.pause & STMMAC_PCS_PAUSE) 309 advertising |= ADVERTISED_Pause; 310 if (adv.pause & STMMAC_PCS_ASYM_PAUSE) 311 advertising |= ADVERTISED_Asym_Pause; 312 if (adv.lp_pause & STMMAC_PCS_PAUSE) 313 lp_advertising |= ADVERTISED_Pause; 314 if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) 315 lp_advertising |= ADVERTISED_Asym_Pause; 316 317 /* Reg49[3] always set because ANE is always supported */ 318 cmd->base.autoneg = ADVERTISED_Autoneg; 319 supported |= SUPPORTED_Autoneg; 320 advertising |= ADVERTISED_Autoneg; 321 lp_advertising |= ADVERTISED_Autoneg; 322 323 if (adv.duplex) { 324 supported |= (SUPPORTED_1000baseT_Full | 325 SUPPORTED_100baseT_Full | 326 SUPPORTED_10baseT_Full); 327 advertising |= (ADVERTISED_1000baseT_Full | 328 ADVERTISED_100baseT_Full | 329 ADVERTISED_10baseT_Full); 330 } else { 331 supported |= (SUPPORTED_1000baseT_Half | 332 SUPPORTED_100baseT_Half | 333 SUPPORTED_10baseT_Half); 334 advertising |= (ADVERTISED_1000baseT_Half | 335 ADVERTISED_100baseT_Half | 336 ADVERTISED_10baseT_Half); 337 } 338 if (adv.lp_duplex) 339 lp_advertising |= (ADVERTISED_1000baseT_Full | 340 ADVERTISED_100baseT_Full | 341 ADVERTISED_10baseT_Full); 342 else 343 lp_advertising |= (ADVERTISED_1000baseT_Half | 344 ADVERTISED_100baseT_Half | 345 ADVERTISED_10baseT_Half); 346 cmd->base.port = PORT_OTHER; 347 348 ethtool_convert_legacy_u32_to_link_mode( 349 cmd->link_modes.supported, supported); 350 ethtool_convert_legacy_u32_to_link_mode( 351 cmd->link_modes.advertising, advertising); 352 ethtool_convert_legacy_u32_to_link_mode( 353 cmd->link_modes.lp_advertising, lp_advertising); 354 355 return 0; 356 } 357 358 if (phy == NULL) { 359 pr_err("%s: %s: PHY is not registered\n", 360 __func__, dev->name); 361 return -ENODEV; 362 } 363 if (!netif_running(dev)) { 364 pr_err("%s: interface is disabled: we cannot track " 365 "link speed / duplex setting\n", dev->name); 366 return -EBUSY; 367 } 368 phy_ethtool_ksettings_get(phy, cmd); 369 return 0; 370 } 371 372 static int 373 stmmac_ethtool_set_link_ksettings(struct net_device *dev, 374 const struct ethtool_link_ksettings *cmd) 375 { 376 struct stmmac_priv *priv = netdev_priv(dev); 377 struct phy_device *phy = dev->phydev; 378 int rc; 379 380 if (priv->hw->pcs & STMMAC_PCS_RGMII || 381 priv->hw->pcs & STMMAC_PCS_SGMII) { 382 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; 383 384 /* Only support ANE */ 385 if (cmd->base.autoneg != AUTONEG_ENABLE) 386 return -EINVAL; 387 388 mask &= (ADVERTISED_1000baseT_Half | 389 ADVERTISED_1000baseT_Full | 390 ADVERTISED_100baseT_Half | 391 ADVERTISED_100baseT_Full | 392 ADVERTISED_10baseT_Half | 393 ADVERTISED_10baseT_Full); 394 395 spin_lock(&priv->lock); 396 397 if (priv->hw->mac->pcs_ctrl_ane) 398 priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, 399 priv->hw->ps, 0); 400 401 spin_unlock(&priv->lock); 402 403 return 0; 404 } 405 406 rc = phy_ethtool_ksettings_set(phy, cmd); 407 408 return rc; 409 } 410 411 static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) 412 { 413 struct stmmac_priv *priv = netdev_priv(dev); 414 return priv->msg_enable; 415 } 416 417 static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) 418 { 419 struct stmmac_priv *priv = netdev_priv(dev); 420 priv->msg_enable = level; 421 422 } 423 424 static int stmmac_check_if_running(struct net_device *dev) 425 { 426 if (!netif_running(dev)) 427 return -EBUSY; 428 return 0; 429 } 430 431 static int stmmac_ethtool_get_regs_len(struct net_device *dev) 432 { 433 return REG_SPACE_SIZE; 434 } 435 436 static void stmmac_ethtool_gregs(struct net_device *dev, 437 struct ethtool_regs *regs, void *space) 438 { 439 u32 *reg_space = (u32 *) space; 440 441 struct stmmac_priv *priv = netdev_priv(dev); 442 443 memset(reg_space, 0x0, REG_SPACE_SIZE); 444 445 priv->hw->mac->dump_regs(priv->hw, reg_space); 446 priv->hw->dma->dump_regs(priv->ioaddr, reg_space); 447 /* Copy DMA registers to where ethtool expects them */ 448 memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], 449 NUM_DWMAC1000_DMA_REGS * 4); 450 } 451 452 static void 453 stmmac_get_pauseparam(struct net_device *netdev, 454 struct ethtool_pauseparam *pause) 455 { 456 struct stmmac_priv *priv = netdev_priv(netdev); 457 458 pause->rx_pause = 0; 459 pause->tx_pause = 0; 460 461 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { 462 struct rgmii_adv adv_lp; 463 464 pause->autoneg = 1; 465 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); 466 if (!adv_lp.pause) 467 return; 468 } else { 469 if (!(netdev->phydev->supported & SUPPORTED_Pause) || 470 !(netdev->phydev->supported & SUPPORTED_Asym_Pause)) 471 return; 472 } 473 474 pause->autoneg = netdev->phydev->autoneg; 475 476 if (priv->flow_ctrl & FLOW_RX) 477 pause->rx_pause = 1; 478 if (priv->flow_ctrl & FLOW_TX) 479 pause->tx_pause = 1; 480 481 } 482 483 static int 484 stmmac_set_pauseparam(struct net_device *netdev, 485 struct ethtool_pauseparam *pause) 486 { 487 struct stmmac_priv *priv = netdev_priv(netdev); 488 u32 tx_cnt = priv->plat->tx_queues_to_use; 489 struct phy_device *phy = netdev->phydev; 490 int new_pause = FLOW_OFF; 491 492 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { 493 struct rgmii_adv adv_lp; 494 495 pause->autoneg = 1; 496 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); 497 if (!adv_lp.pause) 498 return -EOPNOTSUPP; 499 } else { 500 if (!(phy->supported & SUPPORTED_Pause) || 501 !(phy->supported & SUPPORTED_Asym_Pause)) 502 return -EOPNOTSUPP; 503 } 504 505 if (pause->rx_pause) 506 new_pause |= FLOW_RX; 507 if (pause->tx_pause) 508 new_pause |= FLOW_TX; 509 510 priv->flow_ctrl = new_pause; 511 phy->autoneg = pause->autoneg; 512 513 if (phy->autoneg) { 514 if (netif_running(netdev)) 515 return phy_start_aneg(phy); 516 } 517 518 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, 519 priv->pause, tx_cnt); 520 return 0; 521 } 522 523 static void stmmac_get_ethtool_stats(struct net_device *dev, 524 struct ethtool_stats *dummy, u64 *data) 525 { 526 struct stmmac_priv *priv = netdev_priv(dev); 527 u32 rx_queues_count = priv->plat->rx_queues_to_use; 528 u32 tx_queues_count = priv->plat->tx_queues_to_use; 529 int i, j = 0; 530 531 /* Update the DMA HW counters for dwmac10/100 */ 532 if (priv->hw->dma->dma_diagnostic_fr) 533 priv->hw->dma->dma_diagnostic_fr(&dev->stats, 534 (void *) &priv->xstats, 535 priv->ioaddr); 536 else { 537 /* If supported, for new GMAC chips expose the MMC counters */ 538 if (priv->dma_cap.rmon) { 539 dwmac_mmc_read(priv->mmcaddr, &priv->mmc); 540 541 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { 542 char *p; 543 p = (char *)priv + stmmac_mmc[i].stat_offset; 544 545 data[j++] = (stmmac_mmc[i].sizeof_stat == 546 sizeof(u64)) ? (*(u64 *)p) : 547 (*(u32 *)p); 548 } 549 } 550 if (priv->eee_enabled) { 551 int val = phy_get_eee_err(dev->phydev); 552 if (val) 553 priv->xstats.phy_eee_wakeup_error_n = val; 554 } 555 556 if ((priv->hw->mac->debug) && 557 (priv->synopsys_id >= DWMAC_CORE_3_50)) 558 priv->hw->mac->debug(priv->ioaddr, 559 (void *)&priv->xstats, 560 rx_queues_count, tx_queues_count); 561 } 562 for (i = 0; i < STMMAC_STATS_LEN; i++) { 563 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 564 data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == 565 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); 566 } 567 } 568 569 static int stmmac_get_sset_count(struct net_device *netdev, int sset) 570 { 571 struct stmmac_priv *priv = netdev_priv(netdev); 572 int len; 573 574 switch (sset) { 575 case ETH_SS_STATS: 576 len = STMMAC_STATS_LEN; 577 578 if (priv->dma_cap.rmon) 579 len += STMMAC_MMC_STATS_LEN; 580 581 return len; 582 default: 583 return -EOPNOTSUPP; 584 } 585 } 586 587 static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) 588 { 589 int i; 590 u8 *p = data; 591 struct stmmac_priv *priv = netdev_priv(dev); 592 593 switch (stringset) { 594 case ETH_SS_STATS: 595 if (priv->dma_cap.rmon) 596 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { 597 memcpy(p, stmmac_mmc[i].stat_string, 598 ETH_GSTRING_LEN); 599 p += ETH_GSTRING_LEN; 600 } 601 for (i = 0; i < STMMAC_STATS_LEN; i++) { 602 memcpy(p, stmmac_gstrings_stats[i].stat_string, 603 ETH_GSTRING_LEN); 604 p += ETH_GSTRING_LEN; 605 } 606 break; 607 default: 608 WARN_ON(1); 609 break; 610 } 611 } 612 613 /* Currently only support WOL through Magic packet. */ 614 static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 615 { 616 struct stmmac_priv *priv = netdev_priv(dev); 617 618 spin_lock_irq(&priv->lock); 619 if (device_can_wakeup(priv->device)) { 620 wol->supported = WAKE_MAGIC | WAKE_UCAST; 621 wol->wolopts = priv->wolopts; 622 } 623 spin_unlock_irq(&priv->lock); 624 } 625 626 static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 627 { 628 struct stmmac_priv *priv = netdev_priv(dev); 629 u32 support = WAKE_MAGIC | WAKE_UCAST; 630 631 /* By default almost all GMAC devices support the WoL via 632 * magic frame but we can disable it if the HW capability 633 * register shows no support for pmt_magic_frame. */ 634 if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) 635 wol->wolopts &= ~WAKE_MAGIC; 636 637 if (!device_can_wakeup(priv->device)) 638 return -EINVAL; 639 640 if (wol->wolopts & ~support) 641 return -EINVAL; 642 643 if (wol->wolopts) { 644 pr_info("stmmac: wakeup enable\n"); 645 device_set_wakeup_enable(priv->device, 1); 646 enable_irq_wake(priv->wol_irq); 647 } else { 648 device_set_wakeup_enable(priv->device, 0); 649 disable_irq_wake(priv->wol_irq); 650 } 651 652 spin_lock_irq(&priv->lock); 653 priv->wolopts = wol->wolopts; 654 spin_unlock_irq(&priv->lock); 655 656 return 0; 657 } 658 659 static int stmmac_ethtool_op_get_eee(struct net_device *dev, 660 struct ethtool_eee *edata) 661 { 662 struct stmmac_priv *priv = netdev_priv(dev); 663 664 if (!priv->dma_cap.eee) 665 return -EOPNOTSUPP; 666 667 edata->eee_enabled = priv->eee_enabled; 668 edata->eee_active = priv->eee_active; 669 edata->tx_lpi_timer = priv->tx_lpi_timer; 670 671 return phy_ethtool_get_eee(dev->phydev, edata); 672 } 673 674 static int stmmac_ethtool_op_set_eee(struct net_device *dev, 675 struct ethtool_eee *edata) 676 { 677 struct stmmac_priv *priv = netdev_priv(dev); 678 679 priv->eee_enabled = edata->eee_enabled; 680 681 if (!priv->eee_enabled) 682 stmmac_disable_eee_mode(priv); 683 else { 684 /* We are asking for enabling the EEE but it is safe 685 * to verify all by invoking the eee_init function. 686 * In case of failure it will return an error. 687 */ 688 priv->eee_enabled = stmmac_eee_init(priv); 689 if (!priv->eee_enabled) 690 return -EOPNOTSUPP; 691 692 /* Do not change tx_lpi_timer in case of failure */ 693 priv->tx_lpi_timer = edata->tx_lpi_timer; 694 } 695 696 return phy_ethtool_set_eee(dev->phydev, edata); 697 } 698 699 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) 700 { 701 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 702 703 if (!clk) 704 return 0; 705 706 return (usec * (clk / 1000000)) / 256; 707 } 708 709 static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) 710 { 711 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 712 713 if (!clk) 714 return 0; 715 716 return (riwt * 256) / (clk / 1000000); 717 } 718 719 static int stmmac_get_coalesce(struct net_device *dev, 720 struct ethtool_coalesce *ec) 721 { 722 struct stmmac_priv *priv = netdev_priv(dev); 723 724 ec->tx_coalesce_usecs = priv->tx_coal_timer; 725 ec->tx_max_coalesced_frames = priv->tx_coal_frames; 726 727 if (priv->use_riwt) 728 ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); 729 730 return 0; 731 } 732 733 static int stmmac_set_coalesce(struct net_device *dev, 734 struct ethtool_coalesce *ec) 735 { 736 struct stmmac_priv *priv = netdev_priv(dev); 737 u32 rx_cnt = priv->plat->rx_queues_to_use; 738 unsigned int rx_riwt; 739 740 /* Check not supported parameters */ 741 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || 742 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || 743 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || 744 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || 745 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || 746 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || 747 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || 748 (ec->rx_max_coalesced_frames_high) || 749 (ec->tx_max_coalesced_frames_irq) || 750 (ec->stats_block_coalesce_usecs) || 751 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) 752 return -EOPNOTSUPP; 753 754 if (ec->rx_coalesce_usecs == 0) 755 return -EINVAL; 756 757 if ((ec->tx_coalesce_usecs == 0) && 758 (ec->tx_max_coalesced_frames == 0)) 759 return -EINVAL; 760 761 if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) || 762 (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) 763 return -EINVAL; 764 765 rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); 766 767 if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) 768 return -EINVAL; 769 else if (!priv->use_riwt) 770 return -EOPNOTSUPP; 771 772 /* Only copy relevant parameters, ignore all others. */ 773 priv->tx_coal_frames = ec->tx_max_coalesced_frames; 774 priv->tx_coal_timer = ec->tx_coalesce_usecs; 775 priv->rx_riwt = rx_riwt; 776 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); 777 778 return 0; 779 } 780 781 static int stmmac_get_ts_info(struct net_device *dev, 782 struct ethtool_ts_info *info) 783 { 784 struct stmmac_priv *priv = netdev_priv(dev); 785 786 if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) { 787 788 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 789 SOF_TIMESTAMPING_TX_HARDWARE | 790 SOF_TIMESTAMPING_RX_SOFTWARE | 791 SOF_TIMESTAMPING_RX_HARDWARE | 792 SOF_TIMESTAMPING_SOFTWARE | 793 SOF_TIMESTAMPING_RAW_HARDWARE; 794 795 if (priv->ptp_clock) 796 info->phc_index = ptp_clock_index(priv->ptp_clock); 797 798 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 799 800 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | 801 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 802 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 803 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 804 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 805 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 806 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 807 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | 808 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | 809 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 810 (1 << HWTSTAMP_FILTER_ALL)); 811 return 0; 812 } else 813 return ethtool_op_get_ts_info(dev, info); 814 } 815 816 static int stmmac_get_tunable(struct net_device *dev, 817 const struct ethtool_tunable *tuna, void *data) 818 { 819 struct stmmac_priv *priv = netdev_priv(dev); 820 int ret = 0; 821 822 switch (tuna->id) { 823 case ETHTOOL_RX_COPYBREAK: 824 *(u32 *)data = priv->rx_copybreak; 825 break; 826 default: 827 ret = -EINVAL; 828 break; 829 } 830 831 return ret; 832 } 833 834 static int stmmac_set_tunable(struct net_device *dev, 835 const struct ethtool_tunable *tuna, 836 const void *data) 837 { 838 struct stmmac_priv *priv = netdev_priv(dev); 839 int ret = 0; 840 841 switch (tuna->id) { 842 case ETHTOOL_RX_COPYBREAK: 843 priv->rx_copybreak = *(u32 *)data; 844 break; 845 default: 846 ret = -EINVAL; 847 break; 848 } 849 850 return ret; 851 } 852 853 static const struct ethtool_ops stmmac_ethtool_ops = { 854 .begin = stmmac_check_if_running, 855 .get_drvinfo = stmmac_ethtool_getdrvinfo, 856 .get_msglevel = stmmac_ethtool_getmsglevel, 857 .set_msglevel = stmmac_ethtool_setmsglevel, 858 .get_regs = stmmac_ethtool_gregs, 859 .get_regs_len = stmmac_ethtool_get_regs_len, 860 .get_link = ethtool_op_get_link, 861 .nway_reset = phy_ethtool_nway_reset, 862 .get_pauseparam = stmmac_get_pauseparam, 863 .set_pauseparam = stmmac_set_pauseparam, 864 .get_ethtool_stats = stmmac_get_ethtool_stats, 865 .get_strings = stmmac_get_strings, 866 .get_wol = stmmac_get_wol, 867 .set_wol = stmmac_set_wol, 868 .get_eee = stmmac_ethtool_op_get_eee, 869 .set_eee = stmmac_ethtool_op_set_eee, 870 .get_sset_count = stmmac_get_sset_count, 871 .get_ts_info = stmmac_get_ts_info, 872 .get_coalesce = stmmac_get_coalesce, 873 .set_coalesce = stmmac_set_coalesce, 874 .get_tunable = stmmac_get_tunable, 875 .set_tunable = stmmac_set_tunable, 876 .get_link_ksettings = stmmac_ethtool_get_link_ksettings, 877 .set_link_ksettings = stmmac_ethtool_set_link_ksettings, 878 }; 879 880 void stmmac_set_ethtool_ops(struct net_device *netdev) 881 { 882 netdev->ethtool_ops = &stmmac_ethtool_ops; 883 } 884