1 /*******************************************************************************
2   STMMAC Ethtool support
3 
4   Copyright (C) 2007-2009  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17 
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20 
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/interrupt.h>
24 #include <linux/mii.h>
25 #include <linux/phy.h>
26 #include <linux/net_tstamp.h>
27 #include <asm/io.h>
28 
29 #include "stmmac.h"
30 #include "dwmac_dma.h"
31 
32 #define REG_SPACE_SIZE	0x1060
33 #define MAC100_ETHTOOL_NAME	"st_mac100"
34 #define GMAC_ETHTOOL_NAME	"st_gmac"
35 
36 #define ETHTOOL_DMA_OFFSET	55
37 
38 struct stmmac_stats {
39 	char stat_string[ETH_GSTRING_LEN];
40 	int sizeof_stat;
41 	int stat_offset;
42 };
43 
44 #define STMMAC_STAT(m)	\
45 	{ #m, FIELD_SIZEOF(struct stmmac_extra_stats, m),	\
46 	offsetof(struct stmmac_priv, xstats.m)}
47 
48 static const struct stmmac_stats stmmac_gstrings_stats[] = {
49 	/* Transmit errors */
50 	STMMAC_STAT(tx_underflow),
51 	STMMAC_STAT(tx_carrier),
52 	STMMAC_STAT(tx_losscarrier),
53 	STMMAC_STAT(vlan_tag),
54 	STMMAC_STAT(tx_deferred),
55 	STMMAC_STAT(tx_vlan),
56 	STMMAC_STAT(tx_jabber),
57 	STMMAC_STAT(tx_frame_flushed),
58 	STMMAC_STAT(tx_payload_error),
59 	STMMAC_STAT(tx_ip_header_error),
60 	/* Receive errors */
61 	STMMAC_STAT(rx_desc),
62 	STMMAC_STAT(sa_filter_fail),
63 	STMMAC_STAT(overflow_error),
64 	STMMAC_STAT(ipc_csum_error),
65 	STMMAC_STAT(rx_collision),
66 	STMMAC_STAT(rx_crc_errors),
67 	STMMAC_STAT(dribbling_bit),
68 	STMMAC_STAT(rx_length),
69 	STMMAC_STAT(rx_mii),
70 	STMMAC_STAT(rx_multicast),
71 	STMMAC_STAT(rx_gmac_overflow),
72 	STMMAC_STAT(rx_watchdog),
73 	STMMAC_STAT(da_rx_filter_fail),
74 	STMMAC_STAT(sa_rx_filter_fail),
75 	STMMAC_STAT(rx_missed_cntr),
76 	STMMAC_STAT(rx_overflow_cntr),
77 	STMMAC_STAT(rx_vlan),
78 	/* Tx/Rx IRQ error info */
79 	STMMAC_STAT(tx_undeflow_irq),
80 	STMMAC_STAT(tx_process_stopped_irq),
81 	STMMAC_STAT(tx_jabber_irq),
82 	STMMAC_STAT(rx_overflow_irq),
83 	STMMAC_STAT(rx_buf_unav_irq),
84 	STMMAC_STAT(rx_process_stopped_irq),
85 	STMMAC_STAT(rx_watchdog_irq),
86 	STMMAC_STAT(tx_early_irq),
87 	STMMAC_STAT(fatal_bus_error_irq),
88 	/* Tx/Rx IRQ Events */
89 	STMMAC_STAT(rx_early_irq),
90 	STMMAC_STAT(threshold),
91 	STMMAC_STAT(tx_pkt_n),
92 	STMMAC_STAT(rx_pkt_n),
93 	STMMAC_STAT(normal_irq_n),
94 	STMMAC_STAT(rx_normal_irq_n),
95 	STMMAC_STAT(napi_poll),
96 	STMMAC_STAT(tx_normal_irq_n),
97 	STMMAC_STAT(tx_clean),
98 	STMMAC_STAT(tx_set_ic_bit),
99 	STMMAC_STAT(irq_receive_pmt_irq_n),
100 	/* MMC info */
101 	STMMAC_STAT(mmc_tx_irq_n),
102 	STMMAC_STAT(mmc_rx_irq_n),
103 	STMMAC_STAT(mmc_rx_csum_offload_irq_n),
104 	/* EEE */
105 	STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
106 	STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
107 	STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
108 	STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
109 	STMMAC_STAT(phy_eee_wakeup_error_n),
110 	/* Extended RDES status */
111 	STMMAC_STAT(ip_hdr_err),
112 	STMMAC_STAT(ip_payload_err),
113 	STMMAC_STAT(ip_csum_bypassed),
114 	STMMAC_STAT(ipv4_pkt_rcvd),
115 	STMMAC_STAT(ipv6_pkt_rcvd),
116 	STMMAC_STAT(no_ptp_rx_msg_type_ext),
117 	STMMAC_STAT(ptp_rx_msg_type_sync),
118 	STMMAC_STAT(ptp_rx_msg_type_follow_up),
119 	STMMAC_STAT(ptp_rx_msg_type_delay_req),
120 	STMMAC_STAT(ptp_rx_msg_type_delay_resp),
121 	STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
122 	STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
123 	STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
124 	STMMAC_STAT(ptp_rx_msg_type_announce),
125 	STMMAC_STAT(ptp_rx_msg_type_management),
126 	STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
127 	STMMAC_STAT(ptp_frame_type),
128 	STMMAC_STAT(ptp_ver),
129 	STMMAC_STAT(timestamp_dropped),
130 	STMMAC_STAT(av_pkt_rcvd),
131 	STMMAC_STAT(av_tagged_pkt_rcvd),
132 	STMMAC_STAT(vlan_tag_priority_val),
133 	STMMAC_STAT(l3_filter_match),
134 	STMMAC_STAT(l4_filter_match),
135 	STMMAC_STAT(l3_l4_filter_no_match),
136 	/* PCS */
137 	STMMAC_STAT(irq_pcs_ane_n),
138 	STMMAC_STAT(irq_pcs_link_n),
139 	STMMAC_STAT(irq_rgmii_n),
140 	/* DEBUG */
141 	STMMAC_STAT(mtl_tx_status_fifo_full),
142 	STMMAC_STAT(mtl_tx_fifo_not_empty),
143 	STMMAC_STAT(mmtl_fifo_ctrl),
144 	STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
145 	STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
146 	STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
147 	STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
148 	STMMAC_STAT(mac_tx_in_pause),
149 	STMMAC_STAT(mac_tx_frame_ctrl_xfer),
150 	STMMAC_STAT(mac_tx_frame_ctrl_idle),
151 	STMMAC_STAT(mac_tx_frame_ctrl_wait),
152 	STMMAC_STAT(mac_tx_frame_ctrl_pause),
153 	STMMAC_STAT(mac_gmii_tx_proto_engine),
154 	STMMAC_STAT(mtl_rx_fifo_fill_level_full),
155 	STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
156 	STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
157 	STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
158 	STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
159 	STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
160 	STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
161 	STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
162 	STMMAC_STAT(mtl_rx_fifo_ctrl_active),
163 	STMMAC_STAT(mac_rx_frame_ctrl_fifo),
164 	STMMAC_STAT(mac_gmii_rx_proto_engine),
165 	/* TSO */
166 	STMMAC_STAT(tx_tso_frames),
167 	STMMAC_STAT(tx_tso_nfrags),
168 };
169 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
170 
171 /* HW MAC Management counters (if supported) */
172 #define STMMAC_MMC_STAT(m)	\
173 	{ #m, FIELD_SIZEOF(struct stmmac_counters, m),	\
174 	offsetof(struct stmmac_priv, mmc.m)}
175 
176 static const struct stmmac_stats stmmac_mmc[] = {
177 	STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
178 	STMMAC_MMC_STAT(mmc_tx_framecount_gb),
179 	STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
180 	STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
181 	STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
182 	STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
183 	STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
184 	STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
185 	STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
186 	STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
187 	STMMAC_MMC_STAT(mmc_tx_unicast_gb),
188 	STMMAC_MMC_STAT(mmc_tx_multicast_gb),
189 	STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
190 	STMMAC_MMC_STAT(mmc_tx_underflow_error),
191 	STMMAC_MMC_STAT(mmc_tx_singlecol_g),
192 	STMMAC_MMC_STAT(mmc_tx_multicol_g),
193 	STMMAC_MMC_STAT(mmc_tx_deferred),
194 	STMMAC_MMC_STAT(mmc_tx_latecol),
195 	STMMAC_MMC_STAT(mmc_tx_exesscol),
196 	STMMAC_MMC_STAT(mmc_tx_carrier_error),
197 	STMMAC_MMC_STAT(mmc_tx_octetcount_g),
198 	STMMAC_MMC_STAT(mmc_tx_framecount_g),
199 	STMMAC_MMC_STAT(mmc_tx_excessdef),
200 	STMMAC_MMC_STAT(mmc_tx_pause_frame),
201 	STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
202 	STMMAC_MMC_STAT(mmc_rx_framecount_gb),
203 	STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
204 	STMMAC_MMC_STAT(mmc_rx_octetcount_g),
205 	STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
206 	STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
207 	STMMAC_MMC_STAT(mmc_rx_crc_error),
208 	STMMAC_MMC_STAT(mmc_rx_align_error),
209 	STMMAC_MMC_STAT(mmc_rx_run_error),
210 	STMMAC_MMC_STAT(mmc_rx_jabber_error),
211 	STMMAC_MMC_STAT(mmc_rx_undersize_g),
212 	STMMAC_MMC_STAT(mmc_rx_oversize_g),
213 	STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
214 	STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
215 	STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
216 	STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
217 	STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
218 	STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
219 	STMMAC_MMC_STAT(mmc_rx_unicast_g),
220 	STMMAC_MMC_STAT(mmc_rx_length_error),
221 	STMMAC_MMC_STAT(mmc_rx_autofrangetype),
222 	STMMAC_MMC_STAT(mmc_rx_pause_frames),
223 	STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
224 	STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
225 	STMMAC_MMC_STAT(mmc_rx_watchdog_error),
226 	STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
227 	STMMAC_MMC_STAT(mmc_rx_ipc_intr),
228 	STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
229 	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
230 	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
231 	STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
232 	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
233 	STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
234 	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
235 	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
236 	STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
237 	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
238 	STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
239 	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
240 	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
241 	STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
242 	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
243 	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
244 	STMMAC_MMC_STAT(mmc_rx_udp_gd),
245 	STMMAC_MMC_STAT(mmc_rx_udp_err),
246 	STMMAC_MMC_STAT(mmc_rx_tcp_gd),
247 	STMMAC_MMC_STAT(mmc_rx_tcp_err),
248 	STMMAC_MMC_STAT(mmc_rx_icmp_gd),
249 	STMMAC_MMC_STAT(mmc_rx_icmp_err),
250 	STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
251 	STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
252 	STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
253 	STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
254 	STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
255 	STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
256 };
257 #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
258 
259 static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
260 				      struct ethtool_drvinfo *info)
261 {
262 	struct stmmac_priv *priv = netdev_priv(dev);
263 
264 	if (priv->plat->has_gmac || priv->plat->has_gmac4)
265 		strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
266 	else
267 		strlcpy(info->driver, MAC100_ETHTOOL_NAME,
268 			sizeof(info->driver));
269 
270 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
271 }
272 
273 static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
274 					     struct ethtool_link_ksettings *cmd)
275 {
276 	struct stmmac_priv *priv = netdev_priv(dev);
277 	struct phy_device *phy = dev->phydev;
278 
279 	if (priv->hw->pcs & STMMAC_PCS_RGMII ||
280 	    priv->hw->pcs & STMMAC_PCS_SGMII) {
281 		struct rgmii_adv adv;
282 		u32 supported, advertising, lp_advertising;
283 
284 		if (!priv->xstats.pcs_link) {
285 			cmd->base.speed = SPEED_UNKNOWN;
286 			cmd->base.duplex = DUPLEX_UNKNOWN;
287 			return 0;
288 		}
289 		cmd->base.duplex = priv->xstats.pcs_duplex;
290 
291 		cmd->base.speed = priv->xstats.pcs_speed;
292 
293 		/* Get and convert ADV/LP_ADV from the HW AN registers */
294 		if (!priv->hw->mac->pcs_get_adv_lp)
295 			return -EOPNOTSUPP;	/* should never happen indeed */
296 
297 		priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
298 
299 		/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
300 
301 		ethtool_convert_link_mode_to_legacy_u32(
302 			&supported, cmd->link_modes.supported);
303 		ethtool_convert_link_mode_to_legacy_u32(
304 			&advertising, cmd->link_modes.advertising);
305 		ethtool_convert_link_mode_to_legacy_u32(
306 			&lp_advertising, cmd->link_modes.lp_advertising);
307 
308 		if (adv.pause & STMMAC_PCS_PAUSE)
309 			advertising |= ADVERTISED_Pause;
310 		if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
311 			advertising |= ADVERTISED_Asym_Pause;
312 		if (adv.lp_pause & STMMAC_PCS_PAUSE)
313 			lp_advertising |= ADVERTISED_Pause;
314 		if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
315 			lp_advertising |= ADVERTISED_Asym_Pause;
316 
317 		/* Reg49[3] always set because ANE is always supported */
318 		cmd->base.autoneg = ADVERTISED_Autoneg;
319 		supported |= SUPPORTED_Autoneg;
320 		advertising |= ADVERTISED_Autoneg;
321 		lp_advertising |= ADVERTISED_Autoneg;
322 
323 		if (adv.duplex) {
324 			supported |= (SUPPORTED_1000baseT_Full |
325 				      SUPPORTED_100baseT_Full |
326 				      SUPPORTED_10baseT_Full);
327 			advertising |= (ADVERTISED_1000baseT_Full |
328 					ADVERTISED_100baseT_Full |
329 					ADVERTISED_10baseT_Full);
330 		} else {
331 			supported |= (SUPPORTED_1000baseT_Half |
332 				      SUPPORTED_100baseT_Half |
333 				      SUPPORTED_10baseT_Half);
334 			advertising |= (ADVERTISED_1000baseT_Half |
335 					ADVERTISED_100baseT_Half |
336 					ADVERTISED_10baseT_Half);
337 		}
338 		if (adv.lp_duplex)
339 			lp_advertising |= (ADVERTISED_1000baseT_Full |
340 					   ADVERTISED_100baseT_Full |
341 					   ADVERTISED_10baseT_Full);
342 		else
343 			lp_advertising |= (ADVERTISED_1000baseT_Half |
344 					   ADVERTISED_100baseT_Half |
345 					   ADVERTISED_10baseT_Half);
346 		cmd->base.port = PORT_OTHER;
347 
348 		ethtool_convert_legacy_u32_to_link_mode(
349 			cmd->link_modes.supported, supported);
350 		ethtool_convert_legacy_u32_to_link_mode(
351 			cmd->link_modes.advertising, advertising);
352 		ethtool_convert_legacy_u32_to_link_mode(
353 			cmd->link_modes.lp_advertising, lp_advertising);
354 
355 		return 0;
356 	}
357 
358 	if (phy == NULL) {
359 		pr_err("%s: %s: PHY is not registered\n",
360 		       __func__, dev->name);
361 		return -ENODEV;
362 	}
363 	if (!netif_running(dev)) {
364 		pr_err("%s: interface is disabled: we cannot track "
365 		"link speed / duplex setting\n", dev->name);
366 		return -EBUSY;
367 	}
368 	phy_ethtool_ksettings_get(phy, cmd);
369 	return 0;
370 }
371 
372 static int
373 stmmac_ethtool_set_link_ksettings(struct net_device *dev,
374 				  const struct ethtool_link_ksettings *cmd)
375 {
376 	struct stmmac_priv *priv = netdev_priv(dev);
377 	struct phy_device *phy = dev->phydev;
378 	int rc;
379 
380 	if (priv->hw->pcs & STMMAC_PCS_RGMII ||
381 	    priv->hw->pcs & STMMAC_PCS_SGMII) {
382 		u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
383 
384 		/* Only support ANE */
385 		if (cmd->base.autoneg != AUTONEG_ENABLE)
386 			return -EINVAL;
387 
388 		mask &= (ADVERTISED_1000baseT_Half |
389 			ADVERTISED_1000baseT_Full |
390 			ADVERTISED_100baseT_Half |
391 			ADVERTISED_100baseT_Full |
392 			ADVERTISED_10baseT_Half |
393 			ADVERTISED_10baseT_Full);
394 
395 		spin_lock(&priv->lock);
396 
397 		if (priv->hw->mac->pcs_ctrl_ane)
398 			priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
399 						    priv->hw->ps, 0);
400 
401 		spin_unlock(&priv->lock);
402 
403 		return 0;
404 	}
405 
406 	rc = phy_ethtool_ksettings_set(phy, cmd);
407 
408 	return rc;
409 }
410 
411 static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
412 {
413 	struct stmmac_priv *priv = netdev_priv(dev);
414 	return priv->msg_enable;
415 }
416 
417 static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
418 {
419 	struct stmmac_priv *priv = netdev_priv(dev);
420 	priv->msg_enable = level;
421 
422 }
423 
424 static int stmmac_check_if_running(struct net_device *dev)
425 {
426 	if (!netif_running(dev))
427 		return -EBUSY;
428 	return 0;
429 }
430 
431 static int stmmac_ethtool_get_regs_len(struct net_device *dev)
432 {
433 	return REG_SPACE_SIZE;
434 }
435 
436 static void stmmac_ethtool_gregs(struct net_device *dev,
437 			  struct ethtool_regs *regs, void *space)
438 {
439 	u32 *reg_space = (u32 *) space;
440 
441 	struct stmmac_priv *priv = netdev_priv(dev);
442 
443 	memset(reg_space, 0x0, REG_SPACE_SIZE);
444 
445 	priv->hw->mac->dump_regs(priv->hw, reg_space);
446 	priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
447 	/* Copy DMA registers to where ethtool expects them */
448 	memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
449 	       NUM_DWMAC1000_DMA_REGS * 4);
450 }
451 
452 static void
453 stmmac_get_pauseparam(struct net_device *netdev,
454 		      struct ethtool_pauseparam *pause)
455 {
456 	struct stmmac_priv *priv = netdev_priv(netdev);
457 
458 	pause->rx_pause = 0;
459 	pause->tx_pause = 0;
460 
461 	if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
462 		struct rgmii_adv adv_lp;
463 
464 		pause->autoneg = 1;
465 		priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
466 		if (!adv_lp.pause)
467 			return;
468 	} else {
469 		if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
470 		    !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
471 			return;
472 	}
473 
474 	pause->autoneg = netdev->phydev->autoneg;
475 
476 	if (priv->flow_ctrl & FLOW_RX)
477 		pause->rx_pause = 1;
478 	if (priv->flow_ctrl & FLOW_TX)
479 		pause->tx_pause = 1;
480 
481 }
482 
483 static int
484 stmmac_set_pauseparam(struct net_device *netdev,
485 		      struct ethtool_pauseparam *pause)
486 {
487 	struct stmmac_priv *priv = netdev_priv(netdev);
488 	u32 tx_cnt = priv->plat->tx_queues_to_use;
489 	struct phy_device *phy = netdev->phydev;
490 	int new_pause = FLOW_OFF;
491 
492 	if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
493 		struct rgmii_adv adv_lp;
494 
495 		pause->autoneg = 1;
496 		priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
497 		if (!adv_lp.pause)
498 			return -EOPNOTSUPP;
499 	} else {
500 		if (!(phy->supported & SUPPORTED_Pause) ||
501 		    !(phy->supported & SUPPORTED_Asym_Pause))
502 			return -EOPNOTSUPP;
503 	}
504 
505 	if (pause->rx_pause)
506 		new_pause |= FLOW_RX;
507 	if (pause->tx_pause)
508 		new_pause |= FLOW_TX;
509 
510 	priv->flow_ctrl = new_pause;
511 	phy->autoneg = pause->autoneg;
512 
513 	if (phy->autoneg) {
514 		if (netif_running(netdev))
515 			return phy_start_aneg(phy);
516 	}
517 
518 	priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
519 				 priv->pause, tx_cnt);
520 	return 0;
521 }
522 
523 static void stmmac_get_ethtool_stats(struct net_device *dev,
524 				 struct ethtool_stats *dummy, u64 *data)
525 {
526 	const char *(*dump)(struct stmmac_safety_stats *stats, int index,
527 			unsigned long *count);
528 	struct stmmac_priv *priv = netdev_priv(dev);
529 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
530 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
531 	unsigned long count;
532 	int i, j = 0;
533 
534 	if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
535 		dump = priv->hw->mac->safety_feat_dump;
536 
537 		for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
538 			if (dump(&priv->sstats, i, &count))
539 				data[j++] = count;
540 		}
541 	}
542 
543 	/* Update the DMA HW counters for dwmac10/100 */
544 	if (priv->hw->dma->dma_diagnostic_fr)
545 		priv->hw->dma->dma_diagnostic_fr(&dev->stats,
546 						 (void *) &priv->xstats,
547 						 priv->ioaddr);
548 	else {
549 		/* If supported, for new GMAC chips expose the MMC counters */
550 		if (priv->dma_cap.rmon) {
551 			dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
552 
553 			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
554 				char *p;
555 				p = (char *)priv + stmmac_mmc[i].stat_offset;
556 
557 				data[j++] = (stmmac_mmc[i].sizeof_stat ==
558 					     sizeof(u64)) ? (*(u64 *)p) :
559 					     (*(u32 *)p);
560 			}
561 		}
562 		if (priv->eee_enabled) {
563 			int val = phy_get_eee_err(dev->phydev);
564 			if (val)
565 				priv->xstats.phy_eee_wakeup_error_n = val;
566 		}
567 
568 		if ((priv->hw->mac->debug) &&
569 		    (priv->synopsys_id >= DWMAC_CORE_3_50))
570 			priv->hw->mac->debug(priv->ioaddr,
571 					     (void *)&priv->xstats,
572 					     rx_queues_count, tx_queues_count);
573 	}
574 	for (i = 0; i < STMMAC_STATS_LEN; i++) {
575 		char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
576 		data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
577 			     sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
578 	}
579 }
580 
581 static int stmmac_get_sset_count(struct net_device *netdev, int sset)
582 {
583 	struct stmmac_priv *priv = netdev_priv(netdev);
584 	const char *(*dump)(struct stmmac_safety_stats *stats, int index,
585 			unsigned long *count);
586 	int i, len, safety_len = 0;
587 
588 	switch (sset) {
589 	case ETH_SS_STATS:
590 		len = STMMAC_STATS_LEN;
591 
592 		if (priv->dma_cap.rmon)
593 			len += STMMAC_MMC_STATS_LEN;
594 		if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
595 			dump = priv->hw->mac->safety_feat_dump;
596 
597 			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
598 				if (dump(&priv->sstats, i, NULL))
599 					safety_len++;
600 			}
601 
602 			len += safety_len;
603 		}
604 
605 		return len;
606 	default:
607 		return -EOPNOTSUPP;
608 	}
609 }
610 
611 static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
612 {
613 	int i;
614 	u8 *p = data;
615 	struct stmmac_priv *priv = netdev_priv(dev);
616 	const char *(*dump)(struct stmmac_safety_stats *stats, int index,
617 			unsigned long *count);
618 
619 	switch (stringset) {
620 	case ETH_SS_STATS:
621 		if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
622 			dump = priv->hw->mac->safety_feat_dump;
623 			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
624 				const char *desc = dump(&priv->sstats, i, NULL);
625 
626 				if (desc) {
627 					memcpy(p, desc, ETH_GSTRING_LEN);
628 					p += ETH_GSTRING_LEN;
629 				}
630 			}
631 		}
632 		if (priv->dma_cap.rmon)
633 			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
634 				memcpy(p, stmmac_mmc[i].stat_string,
635 				       ETH_GSTRING_LEN);
636 				p += ETH_GSTRING_LEN;
637 			}
638 		for (i = 0; i < STMMAC_STATS_LEN; i++) {
639 			memcpy(p, stmmac_gstrings_stats[i].stat_string,
640 				ETH_GSTRING_LEN);
641 			p += ETH_GSTRING_LEN;
642 		}
643 		break;
644 	default:
645 		WARN_ON(1);
646 		break;
647 	}
648 }
649 
650 /* Currently only support WOL through Magic packet. */
651 static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
652 {
653 	struct stmmac_priv *priv = netdev_priv(dev);
654 
655 	spin_lock_irq(&priv->lock);
656 	if (device_can_wakeup(priv->device)) {
657 		wol->supported = WAKE_MAGIC | WAKE_UCAST;
658 		wol->wolopts = priv->wolopts;
659 	}
660 	spin_unlock_irq(&priv->lock);
661 }
662 
663 static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 {
665 	struct stmmac_priv *priv = netdev_priv(dev);
666 	u32 support = WAKE_MAGIC | WAKE_UCAST;
667 
668 	/* By default almost all GMAC devices support the WoL via
669 	 * magic frame but we can disable it if the HW capability
670 	 * register shows no support for pmt_magic_frame. */
671 	if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
672 		wol->wolopts &= ~WAKE_MAGIC;
673 
674 	if (!device_can_wakeup(priv->device))
675 		return -EINVAL;
676 
677 	if (wol->wolopts & ~support)
678 		return -EINVAL;
679 
680 	if (wol->wolopts) {
681 		pr_info("stmmac: wakeup enable\n");
682 		device_set_wakeup_enable(priv->device, 1);
683 		enable_irq_wake(priv->wol_irq);
684 	} else {
685 		device_set_wakeup_enable(priv->device, 0);
686 		disable_irq_wake(priv->wol_irq);
687 	}
688 
689 	spin_lock_irq(&priv->lock);
690 	priv->wolopts = wol->wolopts;
691 	spin_unlock_irq(&priv->lock);
692 
693 	return 0;
694 }
695 
696 static int stmmac_ethtool_op_get_eee(struct net_device *dev,
697 				     struct ethtool_eee *edata)
698 {
699 	struct stmmac_priv *priv = netdev_priv(dev);
700 
701 	if (!priv->dma_cap.eee)
702 		return -EOPNOTSUPP;
703 
704 	edata->eee_enabled = priv->eee_enabled;
705 	edata->eee_active = priv->eee_active;
706 	edata->tx_lpi_timer = priv->tx_lpi_timer;
707 
708 	return phy_ethtool_get_eee(dev->phydev, edata);
709 }
710 
711 static int stmmac_ethtool_op_set_eee(struct net_device *dev,
712 				     struct ethtool_eee *edata)
713 {
714 	struct stmmac_priv *priv = netdev_priv(dev);
715 
716 	priv->eee_enabled = edata->eee_enabled;
717 
718 	if (!priv->eee_enabled)
719 		stmmac_disable_eee_mode(priv);
720 	else {
721 		/* We are asking for enabling the EEE but it is safe
722 		 * to verify all by invoking the eee_init function.
723 		 * In case of failure it will return an error.
724 		 */
725 		priv->eee_enabled = stmmac_eee_init(priv);
726 		if (!priv->eee_enabled)
727 			return -EOPNOTSUPP;
728 
729 		/* Do not change tx_lpi_timer in case of failure */
730 		priv->tx_lpi_timer = edata->tx_lpi_timer;
731 	}
732 
733 	return phy_ethtool_set_eee(dev->phydev, edata);
734 }
735 
736 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
737 {
738 	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
739 
740 	if (!clk)
741 		return 0;
742 
743 	return (usec * (clk / 1000000)) / 256;
744 }
745 
746 static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
747 {
748 	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
749 
750 	if (!clk)
751 		return 0;
752 
753 	return (riwt * 256) / (clk / 1000000);
754 }
755 
756 static int stmmac_get_coalesce(struct net_device *dev,
757 			       struct ethtool_coalesce *ec)
758 {
759 	struct stmmac_priv *priv = netdev_priv(dev);
760 
761 	ec->tx_coalesce_usecs = priv->tx_coal_timer;
762 	ec->tx_max_coalesced_frames = priv->tx_coal_frames;
763 
764 	if (priv->use_riwt)
765 		ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
766 
767 	return 0;
768 }
769 
770 static int stmmac_set_coalesce(struct net_device *dev,
771 			       struct ethtool_coalesce *ec)
772 {
773 	struct stmmac_priv *priv = netdev_priv(dev);
774 	u32 rx_cnt = priv->plat->rx_queues_to_use;
775 	unsigned int rx_riwt;
776 
777 	/* Check not supported parameters  */
778 	if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
779 	    (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
780 	    (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
781 	    (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
782 	    (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
783 	    (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
784 	    (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
785 	    (ec->rx_max_coalesced_frames_high) ||
786 	    (ec->tx_max_coalesced_frames_irq) ||
787 	    (ec->stats_block_coalesce_usecs) ||
788 	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
789 		return -EOPNOTSUPP;
790 
791 	if (ec->rx_coalesce_usecs == 0)
792 		return -EINVAL;
793 
794 	if ((ec->tx_coalesce_usecs == 0) &&
795 	    (ec->tx_max_coalesced_frames == 0))
796 		return -EINVAL;
797 
798 	if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
799 	    (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
800 		return -EINVAL;
801 
802 	rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
803 
804 	if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
805 		return -EINVAL;
806 	else if (!priv->use_riwt)
807 		return -EOPNOTSUPP;
808 
809 	/* Only copy relevant parameters, ignore all others. */
810 	priv->tx_coal_frames = ec->tx_max_coalesced_frames;
811 	priv->tx_coal_timer = ec->tx_coalesce_usecs;
812 	priv->rx_riwt = rx_riwt;
813 	priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
814 
815 	return 0;
816 }
817 
818 static int stmmac_get_ts_info(struct net_device *dev,
819 			      struct ethtool_ts_info *info)
820 {
821 	struct stmmac_priv *priv = netdev_priv(dev);
822 
823 	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
824 
825 		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
826 					SOF_TIMESTAMPING_TX_HARDWARE |
827 					SOF_TIMESTAMPING_RX_SOFTWARE |
828 					SOF_TIMESTAMPING_RX_HARDWARE |
829 					SOF_TIMESTAMPING_SOFTWARE |
830 					SOF_TIMESTAMPING_RAW_HARDWARE;
831 
832 		if (priv->ptp_clock)
833 			info->phc_index = ptp_clock_index(priv->ptp_clock);
834 
835 		info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
836 
837 		info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
838 				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
839 				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
840 				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
841 				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
842 				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
843 				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
844 				    (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
845 				    (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
846 				    (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
847 				    (1 << HWTSTAMP_FILTER_ALL));
848 		return 0;
849 	} else
850 		return ethtool_op_get_ts_info(dev, info);
851 }
852 
853 static int stmmac_get_tunable(struct net_device *dev,
854 			      const struct ethtool_tunable *tuna, void *data)
855 {
856 	struct stmmac_priv *priv = netdev_priv(dev);
857 	int ret = 0;
858 
859 	switch (tuna->id) {
860 	case ETHTOOL_RX_COPYBREAK:
861 		*(u32 *)data = priv->rx_copybreak;
862 		break;
863 	default:
864 		ret = -EINVAL;
865 		break;
866 	}
867 
868 	return ret;
869 }
870 
871 static int stmmac_set_tunable(struct net_device *dev,
872 			      const struct ethtool_tunable *tuna,
873 			      const void *data)
874 {
875 	struct stmmac_priv *priv = netdev_priv(dev);
876 	int ret = 0;
877 
878 	switch (tuna->id) {
879 	case ETHTOOL_RX_COPYBREAK:
880 		priv->rx_copybreak = *(u32 *)data;
881 		break;
882 	default:
883 		ret = -EINVAL;
884 		break;
885 	}
886 
887 	return ret;
888 }
889 
890 static const struct ethtool_ops stmmac_ethtool_ops = {
891 	.begin = stmmac_check_if_running,
892 	.get_drvinfo = stmmac_ethtool_getdrvinfo,
893 	.get_msglevel = stmmac_ethtool_getmsglevel,
894 	.set_msglevel = stmmac_ethtool_setmsglevel,
895 	.get_regs = stmmac_ethtool_gregs,
896 	.get_regs_len = stmmac_ethtool_get_regs_len,
897 	.get_link = ethtool_op_get_link,
898 	.nway_reset = phy_ethtool_nway_reset,
899 	.get_pauseparam = stmmac_get_pauseparam,
900 	.set_pauseparam = stmmac_set_pauseparam,
901 	.get_ethtool_stats = stmmac_get_ethtool_stats,
902 	.get_strings = stmmac_get_strings,
903 	.get_wol = stmmac_get_wol,
904 	.set_wol = stmmac_set_wol,
905 	.get_eee = stmmac_ethtool_op_get_eee,
906 	.set_eee = stmmac_ethtool_op_set_eee,
907 	.get_sset_count	= stmmac_get_sset_count,
908 	.get_ts_info = stmmac_get_ts_info,
909 	.get_coalesce = stmmac_get_coalesce,
910 	.set_coalesce = stmmac_set_coalesce,
911 	.get_tunable = stmmac_get_tunable,
912 	.set_tunable = stmmac_set_tunable,
913 	.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
914 	.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
915 };
916 
917 void stmmac_set_ethtool_ops(struct net_device *netdev)
918 {
919 	netdev->ethtool_ops = &stmmac_ethtool_ops;
920 }
921