1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 
3 #include <linux/ethtool.h>
4 #include <linux/linkmode.h>
5 #include <linux/netdevice.h>
6 #include <linux/nvme.h>
7 #include <linux/io.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/pci.h>
10 #include <linux/rtnetlink.h>
11 #include "funeth.h"
12 #include "fun_port.h"
13 #include "funeth_txrx.h"
14 
15 /* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
16  * pages is 8. Require it for all types of queues though some could work with
17  * fewer entries.
18  */
19 #define FUNETH_MIN_QDEPTH 8
20 
21 static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
22 	"mac_tx_octets_total",
23 	"mac_tx_frames_total",
24 	"mac_tx_vlan_frames_ok",
25 	"mac_tx_unicast_frames",
26 	"mac_tx_multicast_frames",
27 	"mac_tx_broadcast_frames",
28 	"mac_tx_errors",
29 	"mac_tx_CBFCPAUSE0",
30 	"mac_tx_CBFCPAUSE1",
31 	"mac_tx_CBFCPAUSE2",
32 	"mac_tx_CBFCPAUSE3",
33 	"mac_tx_CBFCPAUSE4",
34 	"mac_tx_CBFCPAUSE5",
35 	"mac_tx_CBFCPAUSE6",
36 	"mac_tx_CBFCPAUSE7",
37 	"mac_tx_CBFCPAUSE8",
38 	"mac_tx_CBFCPAUSE9",
39 	"mac_tx_CBFCPAUSE10",
40 	"mac_tx_CBFCPAUSE11",
41 	"mac_tx_CBFCPAUSE12",
42 	"mac_tx_CBFCPAUSE13",
43 	"mac_tx_CBFCPAUSE14",
44 	"mac_tx_CBFCPAUSE15",
45 };
46 
47 static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
48 	"mac_rx_octets_total",
49 	"mac_rx_frames_total",
50 	"mac_rx_VLAN_frames_ok",
51 	"mac_rx_unicast_frames",
52 	"mac_rx_multicast_frames",
53 	"mac_rx_broadcast_frames",
54 	"mac_rx_drop_events",
55 	"mac_rx_errors",
56 	"mac_rx_alignment_errors",
57 	"mac_rx_CBFCPAUSE0",
58 	"mac_rx_CBFCPAUSE1",
59 	"mac_rx_CBFCPAUSE2",
60 	"mac_rx_CBFCPAUSE3",
61 	"mac_rx_CBFCPAUSE4",
62 	"mac_rx_CBFCPAUSE5",
63 	"mac_rx_CBFCPAUSE6",
64 	"mac_rx_CBFCPAUSE7",
65 	"mac_rx_CBFCPAUSE8",
66 	"mac_rx_CBFCPAUSE9",
67 	"mac_rx_CBFCPAUSE10",
68 	"mac_rx_CBFCPAUSE11",
69 	"mac_rx_CBFCPAUSE12",
70 	"mac_rx_CBFCPAUSE13",
71 	"mac_rx_CBFCPAUSE14",
72 	"mac_rx_CBFCPAUSE15",
73 };
74 
75 static const char * const txq_stat_names[] = {
76 	"tx_pkts",
77 	"tx_bytes",
78 	"tx_cso",
79 	"tx_tso",
80 	"tx_encapsulated_tso",
81 	"tx_more",
82 	"tx_queue_stops",
83 	"tx_queue_restarts",
84 	"tx_mapping_errors",
85 	"tx_tls_encrypted_packets",
86 	"tx_tls_encrypted_bytes",
87 	"tx_tls_ooo",
88 	"tx_tls_drop_no_sync_data",
89 };
90 
91 static const char * const xdpq_stat_names[] = {
92 	"tx_xdp_pkts",
93 	"tx_xdp_bytes",
94 	"tx_xdp_full",
95 	"tx_xdp_mapping_errors",
96 };
97 
98 static const char * const rxq_stat_names[] = {
99 	"rx_pkts",
100 	"rx_bytes",
101 	"rx_cso",
102 	"gro_pkts",
103 	"gro_merged",
104 	"rx_xdp_tx",
105 	"rx_xdp_redir",
106 	"rx_xdp_drops",
107 	"rx_buffers",
108 	"rx_page_allocs",
109 	"rx_drops",
110 	"rx_budget_exhausted",
111 	"rx_mapping_errors",
112 };
113 
114 static const char * const tls_stat_names[] = {
115 	"tx_tls_ctx",
116 	"tx_tls_del",
117 	"tx_tls_resync",
118 };
119 
120 static void fun_link_modes_to_ethtool(u64 modes,
121 				      unsigned long *ethtool_modes_map)
122 {
123 #define ADD_LINK_MODE(mode) \
124 	__set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
125 
126 	if (modes & FUN_PORT_CAP_AUTONEG)
127 		ADD_LINK_MODE(Autoneg);
128 	if (modes & FUN_PORT_CAP_1000_X)
129 		ADD_LINK_MODE(1000baseX_Full);
130 	if (modes & FUN_PORT_CAP_10G_R) {
131 		ADD_LINK_MODE(10000baseCR_Full);
132 		ADD_LINK_MODE(10000baseSR_Full);
133 		ADD_LINK_MODE(10000baseLR_Full);
134 		ADD_LINK_MODE(10000baseER_Full);
135 	}
136 	if (modes & FUN_PORT_CAP_25G_R) {
137 		ADD_LINK_MODE(25000baseCR_Full);
138 		ADD_LINK_MODE(25000baseSR_Full);
139 	}
140 	if (modes & FUN_PORT_CAP_40G_R4) {
141 		ADD_LINK_MODE(40000baseCR4_Full);
142 		ADD_LINK_MODE(40000baseSR4_Full);
143 		ADD_LINK_MODE(40000baseLR4_Full);
144 	}
145 	if (modes & FUN_PORT_CAP_50G_R2) {
146 		ADD_LINK_MODE(50000baseCR2_Full);
147 		ADD_LINK_MODE(50000baseSR2_Full);
148 	}
149 	if (modes & FUN_PORT_CAP_50G_R) {
150 		ADD_LINK_MODE(50000baseCR_Full);
151 		ADD_LINK_MODE(50000baseSR_Full);
152 		ADD_LINK_MODE(50000baseLR_ER_FR_Full);
153 	}
154 	if (modes & FUN_PORT_CAP_100G_R4) {
155 		ADD_LINK_MODE(100000baseCR4_Full);
156 		ADD_LINK_MODE(100000baseSR4_Full);
157 		ADD_LINK_MODE(100000baseLR4_ER4_Full);
158 	}
159 	if (modes & FUN_PORT_CAP_100G_R2) {
160 		ADD_LINK_MODE(100000baseCR2_Full);
161 		ADD_LINK_MODE(100000baseSR2_Full);
162 		ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
163 	}
164 	if (modes & FUN_PORT_CAP_FEC_NONE)
165 		ADD_LINK_MODE(FEC_NONE);
166 	if (modes & FUN_PORT_CAP_FEC_FC)
167 		ADD_LINK_MODE(FEC_BASER);
168 	if (modes & FUN_PORT_CAP_FEC_RS)
169 		ADD_LINK_MODE(FEC_RS);
170 	if (modes & FUN_PORT_CAP_RX_PAUSE)
171 		ADD_LINK_MODE(Pause);
172 
173 #undef ADD_LINK_MODE
174 }
175 
176 static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
177 {
178 	bool rx_pause, tx_pause;
179 
180 	rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
181 	tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
182 	if (tx_pause ^ rx_pause)
183 		ethtool_link_ksettings_add_link_mode(ks, advertising,
184 						     Asym_Pause);
185 }
186 
187 static unsigned int fun_port_type(unsigned int xcvr)
188 {
189 	if (!xcvr)
190 		return PORT_NONE;
191 
192 	switch (xcvr & 7) {
193 	case FUN_XCVR_BASET:
194 		return PORT_TP;
195 	case FUN_XCVR_CU:
196 		return PORT_DA;
197 	default:
198 		return PORT_FIBRE;
199 	}
200 }
201 
202 static int fun_get_link_ksettings(struct net_device *netdev,
203 				  struct ethtool_link_ksettings *ks)
204 {
205 	const struct funeth_priv *fp = netdev_priv(netdev);
206 	unsigned int seq, speed, xcvr;
207 	u64 lp_advertising;
208 	bool link_up;
209 
210 	ethtool_link_ksettings_zero_link_mode(ks, supported);
211 	ethtool_link_ksettings_zero_link_mode(ks, advertising);
212 	ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
213 
214 	/* Link settings change asynchronously, take a consistent snapshot */
215 	do {
216 		seq = read_seqcount_begin(&fp->link_seq);
217 		link_up = netif_carrier_ok(netdev);
218 		speed = fp->link_speed;
219 		xcvr = fp->xcvr_type;
220 		lp_advertising = fp->lp_advertising;
221 	} while (read_seqcount_retry(&fp->link_seq, seq));
222 
223 	if (link_up) {
224 		ks->base.speed = speed;
225 		ks->base.duplex = DUPLEX_FULL;
226 		fun_link_modes_to_ethtool(lp_advertising,
227 					  ks->link_modes.lp_advertising);
228 	} else {
229 		ks->base.speed = SPEED_UNKNOWN;
230 		ks->base.duplex = DUPLEX_UNKNOWN;
231 	}
232 
233 	ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
234 			   AUTONEG_ENABLE : AUTONEG_DISABLE;
235 	ks->base.port = fun_port_type(xcvr);
236 
237 	fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
238 	if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
239 		ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
240 
241 	fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
242 	set_asym_pause(fp->advertising, ks);
243 	return 0;
244 }
245 
246 static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
247 {
248 	u64 modes = 0;
249 
250 #define HAS_MODE(mode) \
251 	ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
252 
253 	if (HAS_MODE(1000baseX_Full))
254 		modes |= FUN_PORT_CAP_1000_X;
255 	if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
256 	    HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
257 		modes |= FUN_PORT_CAP_10G_R;
258 	if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
259 		modes |= FUN_PORT_CAP_25G_R;
260 	if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
261 	    HAS_MODE(40000baseLR4_Full))
262 		modes |= FUN_PORT_CAP_40G_R4;
263 	if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
264 		modes |= FUN_PORT_CAP_50G_R2;
265 	if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
266 	    HAS_MODE(50000baseLR_ER_FR_Full))
267 		modes |= FUN_PORT_CAP_50G_R;
268 	if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
269 	    HAS_MODE(100000baseLR4_ER4_Full))
270 		modes |= FUN_PORT_CAP_100G_R4;
271 	if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
272 	    HAS_MODE(100000baseLR2_ER2_FR2_Full))
273 		modes |= FUN_PORT_CAP_100G_R2;
274 
275 	return modes;
276 #undef HAS_MODE
277 }
278 
279 static u64 fun_speed_to_link_mode(unsigned int speed)
280 {
281 	switch (speed) {
282 	case SPEED_100000:
283 		return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
284 	case SPEED_50000:
285 		return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
286 	case SPEED_40000:
287 		return FUN_PORT_CAP_40G_R4;
288 	case SPEED_25000:
289 		return FUN_PORT_CAP_25G_R;
290 	case SPEED_10000:
291 		return FUN_PORT_CAP_10G_R;
292 	case SPEED_1000:
293 		return FUN_PORT_CAP_1000_X;
294 	default:
295 		return 0;
296 	}
297 }
298 
299 static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
300 {
301 	int err;
302 
303 	if (new_advert == fp->advertising)
304 		return 0;
305 
306 	err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
307 	if (!err)
308 		fp->advertising = new_advert;
309 	return err;
310 }
311 
312 #define FUN_PORT_CAP_FEC_MASK \
313 	(FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
314 
315 static int fun_set_link_ksettings(struct net_device *netdev,
316 				  const struct ethtool_link_ksettings *ks)
317 {
318 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
319 	struct funeth_priv *fp = netdev_priv(netdev);
320 	u64 new_advert;
321 
322 	/* eswitch ports don't support mode changes */
323 	if (fp->port_caps & FUN_PORT_CAP_VPORT)
324 		return -EOPNOTSUPP;
325 
326 	if (ks->base.duplex == DUPLEX_HALF)
327 		return -EINVAL;
328 	if (ks->base.autoneg == AUTONEG_ENABLE &&
329 	    !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
330 		return -EINVAL;
331 
332 	if (ks->base.autoneg == AUTONEG_ENABLE) {
333 		if (linkmode_empty(ks->link_modes.advertising))
334 			return -EINVAL;
335 
336 		fun_link_modes_to_ethtool(fp->port_caps, supported);
337 		if (!linkmode_subset(ks->link_modes.advertising, supported))
338 			return -EINVAL;
339 
340 		new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
341 	} else {
342 		new_advert = fun_speed_to_link_mode(ks->base.speed);
343 		new_advert &= fp->port_caps;
344 		if (!new_advert)
345 			return -EINVAL;
346 	}
347 	new_advert |= fp->advertising &
348 		      (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
349 
350 	return fun_change_advert(fp, new_advert);
351 }
352 
353 static void fun_get_pauseparam(struct net_device *netdev,
354 			       struct ethtool_pauseparam *pause)
355 {
356 	const struct funeth_priv *fp = netdev_priv(netdev);
357 	u8 active_pause = fp->active_fc;
358 
359 	pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
360 	pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
361 	pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
362 }
363 
364 static int fun_set_pauseparam(struct net_device *netdev,
365 			      struct ethtool_pauseparam *pause)
366 {
367 	struct funeth_priv *fp = netdev_priv(netdev);
368 	u64 new_advert;
369 
370 	if (fp->port_caps & FUN_PORT_CAP_VPORT)
371 		return -EOPNOTSUPP;
372 	/* Forcing PAUSE settings with AN enabled is unsupported. */
373 	if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
374 		return -EOPNOTSUPP;
375 	if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
376 		return -EINVAL;
377 	if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
378 		return -EINVAL;
379 	if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
380 		return -EINVAL;
381 
382 	new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
383 	if (pause->tx_pause)
384 		new_advert |= FUN_PORT_CAP_TX_PAUSE;
385 	if (pause->rx_pause)
386 		new_advert |= FUN_PORT_CAP_RX_PAUSE;
387 
388 	return fun_change_advert(fp, new_advert);
389 }
390 
391 static int fun_restart_an(struct net_device *netdev)
392 {
393 	struct funeth_priv *fp = netdev_priv(netdev);
394 
395 	if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
396 		return -EOPNOTSUPP;
397 
398 	return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
399 				  FUN_PORT_CAP_AUTONEG);
400 }
401 
402 static int fun_set_phys_id(struct net_device *netdev,
403 			   enum ethtool_phys_id_state state)
404 {
405 	struct funeth_priv *fp = netdev_priv(netdev);
406 	unsigned int beacon;
407 
408 	if (fp->port_caps & FUN_PORT_CAP_VPORT)
409 		return -EOPNOTSUPP;
410 	if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
411 		return -EOPNOTSUPP;
412 
413 	beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
414 					      FUN_PORT_LED_BEACON_OFF;
415 	return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
416 }
417 
418 static void fun_get_drvinfo(struct net_device *netdev,
419 			    struct ethtool_drvinfo *info)
420 {
421 	const struct funeth_priv *fp = netdev_priv(netdev);
422 
423 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
424 	strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
425 }
426 
427 static u32 fun_get_msglevel(struct net_device *netdev)
428 {
429 	const struct funeth_priv *fp = netdev_priv(netdev);
430 
431 	return fp->msg_enable;
432 }
433 
434 static void fun_set_msglevel(struct net_device *netdev, u32 value)
435 {
436 	struct funeth_priv *fp = netdev_priv(netdev);
437 
438 	fp->msg_enable = value;
439 }
440 
441 static int fun_get_regs_len(struct net_device *dev)
442 {
443 	return NVME_REG_ACQ + sizeof(u64);
444 }
445 
446 static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
447 			 void *buf)
448 {
449 	const struct funeth_priv *fp = netdev_priv(dev);
450 	void __iomem *bar = fp->fdev->bar;
451 
452 	regs->version = 0;
453 	*(u64 *)(buf + NVME_REG_CAP)   = readq(bar + NVME_REG_CAP);
454 	*(u32 *)(buf + NVME_REG_VS)    = readl(bar + NVME_REG_VS);
455 	*(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
456 	*(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
457 	*(u32 *)(buf + NVME_REG_CC)    = readl(bar + NVME_REG_CC);
458 	*(u32 *)(buf + NVME_REG_CSTS)  = readl(bar + NVME_REG_CSTS);
459 	*(u32 *)(buf + NVME_REG_AQA)   = readl(bar + NVME_REG_AQA);
460 	*(u64 *)(buf + NVME_REG_ASQ)   = readq(bar + NVME_REG_ASQ);
461 	*(u64 *)(buf + NVME_REG_ACQ)   = readq(bar + NVME_REG_ACQ);
462 }
463 
464 static int fun_get_coalesce(struct net_device *netdev,
465 			    struct ethtool_coalesce *coal,
466 			    struct kernel_ethtool_coalesce *kcoal,
467 			    struct netlink_ext_ack *ext_ack)
468 {
469 	const struct funeth_priv *fp = netdev_priv(netdev);
470 
471 	coal->rx_coalesce_usecs        = fp->rx_coal_usec;
472 	coal->rx_max_coalesced_frames  = fp->rx_coal_count;
473 	coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
474 	coal->tx_coalesce_usecs        = fp->tx_coal_usec;
475 	coal->tx_max_coalesced_frames  = fp->tx_coal_count;
476 	return 0;
477 }
478 
479 static int fun_set_coalesce(struct net_device *netdev,
480 			    struct ethtool_coalesce *coal,
481 			    struct kernel_ethtool_coalesce *kcoal,
482 			    struct netlink_ext_ack *ext_ack)
483 {
484 	struct funeth_priv *fp = netdev_priv(netdev);
485 	struct funeth_rxq **rxqs;
486 	unsigned int i, db_val;
487 
488 	if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
489 	    coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
490 	    (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
491 	    coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
492 	    coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
493 	    (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
494 		return -EINVAL;
495 
496 	/* a timer is required if there's any coalescing */
497 	if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
498 	    (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
499 		return -EINVAL;
500 
501 	fp->rx_coal_usec  = coal->rx_coalesce_usecs;
502 	fp->rx_coal_count = coal->rx_max_coalesced_frames;
503 	fp->tx_coal_usec  = coal->tx_coalesce_usecs;
504 	fp->tx_coal_count = coal->tx_max_coalesced_frames;
505 
506 	db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
507 	WRITE_ONCE(fp->cq_irq_db, db_val);
508 
509 	rxqs = rtnl_dereference(fp->rxqs);
510 	if (!rxqs)
511 		return 0;
512 
513 	for (i = 0; i < netdev->real_num_rx_queues; i++)
514 		WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
515 
516 	db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
517 	for (i = 0; i < netdev->real_num_tx_queues; i++)
518 		WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
519 
520 	return 0;
521 }
522 
523 static void fun_get_channels(struct net_device *netdev,
524 			     struct ethtool_channels *chan)
525 {
526 	chan->max_rx   = netdev->num_rx_queues;
527 	chan->rx_count = netdev->real_num_rx_queues;
528 
529 	chan->max_tx   = netdev->num_tx_queues;
530 	chan->tx_count = netdev->real_num_tx_queues;
531 }
532 
533 static int fun_set_channels(struct net_device *netdev,
534 			    struct ethtool_channels *chan)
535 {
536 	if (!chan->tx_count || !chan->rx_count)
537 		return -EINVAL;
538 
539 	if (chan->tx_count == netdev->real_num_tx_queues &&
540 	    chan->rx_count == netdev->real_num_rx_queues)
541 		return 0;
542 
543 	if (netif_running(netdev))
544 		return fun_change_num_queues(netdev, chan->tx_count,
545 					     chan->rx_count);
546 
547 	fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
548 	return 0;
549 }
550 
551 static void fun_get_ringparam(struct net_device *netdev,
552 			      struct ethtool_ringparam *ring,
553 			      struct kernel_ethtool_ringparam *kring,
554 			      struct netlink_ext_ack *extack)
555 {
556 	const struct funeth_priv *fp = netdev_priv(netdev);
557 	unsigned int max_depth = fp->fdev->q_depth;
558 
559 	/* We size CQs to be twice the RQ depth so max RQ depth is half the
560 	 * max queue depth.
561 	 */
562 	ring->rx_max_pending = max_depth / 2;
563 	ring->tx_max_pending = max_depth;
564 
565 	ring->rx_pending = fp->rq_depth;
566 	ring->tx_pending = fp->sq_depth;
567 
568 	kring->rx_buf_len = PAGE_SIZE;
569 	kring->cqe_size = FUNETH_CQE_SIZE;
570 }
571 
572 static int fun_set_ringparam(struct net_device *netdev,
573 			     struct ethtool_ringparam *ring,
574 			     struct kernel_ethtool_ringparam *kring,
575 			     struct netlink_ext_ack *extack)
576 {
577 	struct funeth_priv *fp = netdev_priv(netdev);
578 	int rc;
579 
580 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
581 		return -EINVAL;
582 
583 	/* queue depths must be powers-of-2 */
584 	if (!is_power_of_2(ring->rx_pending) ||
585 	    !is_power_of_2(ring->tx_pending))
586 		return -EINVAL;
587 
588 	if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
589 	    ring->tx_pending < FUNETH_MIN_QDEPTH)
590 		return -EINVAL;
591 
592 	if (fp->sq_depth == ring->tx_pending &&
593 	    fp->rq_depth == ring->rx_pending)
594 		return 0;
595 
596 	if (netif_running(netdev)) {
597 		struct fun_qset req = {
598 			.cq_depth = 2 * ring->rx_pending,
599 			.rq_depth = ring->rx_pending,
600 			.sq_depth = ring->tx_pending
601 		};
602 
603 		rc = fun_replace_queues(netdev, &req, extack);
604 		if (rc)
605 			return rc;
606 	}
607 
608 	fp->sq_depth = ring->tx_pending;
609 	fp->rq_depth = ring->rx_pending;
610 	fp->cq_depth = 2 * fp->rq_depth;
611 	return 0;
612 }
613 
614 static int fun_get_sset_count(struct net_device *dev, int sset)
615 {
616 	const struct funeth_priv *fp = netdev_priv(dev);
617 	int n;
618 
619 	switch (sset) {
620 	case ETH_SS_STATS:
621 		n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
622 		    (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
623 		    (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
624 		    ARRAY_SIZE(tls_stat_names);
625 		if (fp->port_caps & FUN_PORT_CAP_STATS) {
626 			n += ARRAY_SIZE(mac_tx_stat_names) +
627 			     ARRAY_SIZE(mac_rx_stat_names);
628 		}
629 		return n;
630 	default:
631 		break;
632 	}
633 	return 0;
634 }
635 
636 static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
637 {
638 	const struct funeth_priv *fp = netdev_priv(netdev);
639 	unsigned int i, j;
640 	u8 *p = data;
641 
642 	switch (sset) {
643 	case ETH_SS_STATS:
644 		if (fp->port_caps & FUN_PORT_CAP_STATS) {
645 			memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
646 			p += sizeof(mac_tx_stat_names);
647 			memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
648 			p += sizeof(mac_rx_stat_names);
649 		}
650 
651 		for (i = 0; i < netdev->real_num_tx_queues; i++) {
652 			for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
653 				ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
654 						i);
655 		}
656 		for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
657 			ethtool_sprintf(&p, txq_stat_names[j]);
658 
659 		for (i = 0; i < fp->num_xdpqs; i++) {
660 			for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
661 				ethtool_sprintf(&p, "%s[%u]",
662 						xdpq_stat_names[j], i);
663 		}
664 		for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
665 			ethtool_sprintf(&p, xdpq_stat_names[j]);
666 
667 		for (i = 0; i < netdev->real_num_rx_queues; i++) {
668 			for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
669 				ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
670 						i);
671 		}
672 		for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
673 			ethtool_sprintf(&p, rxq_stat_names[j]);
674 
675 		for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
676 			ethtool_sprintf(&p, tls_stat_names[j]);
677 		break;
678 	default:
679 		break;
680 	}
681 }
682 
683 static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
684 {
685 #define TX_STAT(s) \
686 	*data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
687 
688 	TX_STAT(etherStatsOctets);
689 	TX_STAT(etherStatsPkts);
690 	TX_STAT(VLANTransmittedOK);
691 	TX_STAT(ifOutUcastPkts);
692 	TX_STAT(ifOutMulticastPkts);
693 	TX_STAT(ifOutBroadcastPkts);
694 	TX_STAT(ifOutErrors);
695 	TX_STAT(CBFCPAUSEFramesTransmitted_0);
696 	TX_STAT(CBFCPAUSEFramesTransmitted_1);
697 	TX_STAT(CBFCPAUSEFramesTransmitted_2);
698 	TX_STAT(CBFCPAUSEFramesTransmitted_3);
699 	TX_STAT(CBFCPAUSEFramesTransmitted_4);
700 	TX_STAT(CBFCPAUSEFramesTransmitted_5);
701 	TX_STAT(CBFCPAUSEFramesTransmitted_6);
702 	TX_STAT(CBFCPAUSEFramesTransmitted_7);
703 	TX_STAT(CBFCPAUSEFramesTransmitted_8);
704 	TX_STAT(CBFCPAUSEFramesTransmitted_9);
705 	TX_STAT(CBFCPAUSEFramesTransmitted_10);
706 	TX_STAT(CBFCPAUSEFramesTransmitted_11);
707 	TX_STAT(CBFCPAUSEFramesTransmitted_12);
708 	TX_STAT(CBFCPAUSEFramesTransmitted_13);
709 	TX_STAT(CBFCPAUSEFramesTransmitted_14);
710 	TX_STAT(CBFCPAUSEFramesTransmitted_15);
711 
712 #define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
713 
714 	RX_STAT(etherStatsOctets);
715 	RX_STAT(etherStatsPkts);
716 	RX_STAT(VLANReceivedOK);
717 	RX_STAT(ifInUcastPkts);
718 	RX_STAT(ifInMulticastPkts);
719 	RX_STAT(ifInBroadcastPkts);
720 	RX_STAT(etherStatsDropEvents);
721 	RX_STAT(ifInErrors);
722 	RX_STAT(aAlignmentErrors);
723 	RX_STAT(CBFCPAUSEFramesReceived_0);
724 	RX_STAT(CBFCPAUSEFramesReceived_1);
725 	RX_STAT(CBFCPAUSEFramesReceived_2);
726 	RX_STAT(CBFCPAUSEFramesReceived_3);
727 	RX_STAT(CBFCPAUSEFramesReceived_4);
728 	RX_STAT(CBFCPAUSEFramesReceived_5);
729 	RX_STAT(CBFCPAUSEFramesReceived_6);
730 	RX_STAT(CBFCPAUSEFramesReceived_7);
731 	RX_STAT(CBFCPAUSEFramesReceived_8);
732 	RX_STAT(CBFCPAUSEFramesReceived_9);
733 	RX_STAT(CBFCPAUSEFramesReceived_10);
734 	RX_STAT(CBFCPAUSEFramesReceived_11);
735 	RX_STAT(CBFCPAUSEFramesReceived_12);
736 	RX_STAT(CBFCPAUSEFramesReceived_13);
737 	RX_STAT(CBFCPAUSEFramesReceived_14);
738 	RX_STAT(CBFCPAUSEFramesReceived_15);
739 
740 	return data;
741 
742 #undef TX_STAT
743 #undef RX_STAT
744 }
745 
746 static void fun_get_ethtool_stats(struct net_device *netdev,
747 				  struct ethtool_stats *stats, u64 *data)
748 {
749 	const struct funeth_priv *fp = netdev_priv(netdev);
750 	struct funeth_txq_stats txs;
751 	struct funeth_rxq_stats rxs;
752 	struct funeth_txq **xdpqs;
753 	struct funeth_rxq **rxqs;
754 	unsigned int i, start;
755 	u64 *totals, *tot;
756 
757 	if (fp->port_caps & FUN_PORT_CAP_STATS)
758 		data = get_mac_stats(fp, data);
759 
760 	rxqs = rtnl_dereference(fp->rxqs);
761 	if (!rxqs)
762 		return;
763 
764 #define ADD_STAT(cnt) do { \
765 	*data = (cnt); *tot++ += *data++; \
766 } while (0)
767 
768 	/* Tx queues */
769 	totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
770 
771 	for (i = 0; i < netdev->real_num_tx_queues; i++) {
772 		tot = totals;
773 
774 		FUN_QSTAT_READ(fp->txqs[i], start, txs);
775 
776 		ADD_STAT(txs.tx_pkts);
777 		ADD_STAT(txs.tx_bytes);
778 		ADD_STAT(txs.tx_cso);
779 		ADD_STAT(txs.tx_tso);
780 		ADD_STAT(txs.tx_encap_tso);
781 		ADD_STAT(txs.tx_more);
782 		ADD_STAT(txs.tx_nstops);
783 		ADD_STAT(txs.tx_nrestarts);
784 		ADD_STAT(txs.tx_map_err);
785 		ADD_STAT(txs.tx_tls_pkts);
786 		ADD_STAT(txs.tx_tls_bytes);
787 		ADD_STAT(txs.tx_tls_fallback);
788 		ADD_STAT(txs.tx_tls_drops);
789 	}
790 	data += ARRAY_SIZE(txq_stat_names);
791 
792 	/* XDP Tx queues */
793 	xdpqs = rtnl_dereference(fp->xdpqs);
794 	totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
795 
796 	for (i = 0; i < fp->num_xdpqs; i++) {
797 		tot = totals;
798 
799 		FUN_QSTAT_READ(xdpqs[i], start, txs);
800 
801 		ADD_STAT(txs.tx_pkts);
802 		ADD_STAT(txs.tx_bytes);
803 		ADD_STAT(txs.tx_xdp_full);
804 		ADD_STAT(txs.tx_map_err);
805 	}
806 	data += ARRAY_SIZE(xdpq_stat_names);
807 
808 	/* Rx queues */
809 	totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
810 
811 	for (i = 0; i < netdev->real_num_rx_queues; i++) {
812 		tot = totals;
813 
814 		FUN_QSTAT_READ(rxqs[i], start, rxs);
815 
816 		ADD_STAT(rxs.rx_pkts);
817 		ADD_STAT(rxs.rx_bytes);
818 		ADD_STAT(rxs.rx_cso);
819 		ADD_STAT(rxs.gro_pkts);
820 		ADD_STAT(rxs.gro_merged);
821 		ADD_STAT(rxs.xdp_tx);
822 		ADD_STAT(rxs.xdp_redir);
823 		ADD_STAT(rxs.xdp_drops);
824 		ADD_STAT(rxs.rx_bufs);
825 		ADD_STAT(rxs.rx_page_alloc);
826 		ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
827 		ADD_STAT(rxs.rx_budget);
828 		ADD_STAT(rxs.rx_map_err);
829 	}
830 	data += ARRAY_SIZE(rxq_stat_names);
831 #undef ADD_STAT
832 
833 	*data++ = atomic64_read(&fp->tx_tls_add);
834 	*data++ = atomic64_read(&fp->tx_tls_del);
835 	*data++ = atomic64_read(&fp->tx_tls_resync);
836 }
837 
838 #define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
839 #define TX_STAT(fp, s) \
840 	be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
841 #define FEC_STAT(fp, s) \
842 	be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
843 				PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
844 
845 static void fun_get_pause_stats(struct net_device *netdev,
846 				struct ethtool_pause_stats *stats)
847 {
848 	const struct funeth_priv *fp = netdev_priv(netdev);
849 
850 	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
851 		return;
852 
853 	stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
854 	stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
855 }
856 
857 static void fun_get_802_3_stats(struct net_device *netdev,
858 				struct ethtool_eth_mac_stats *stats)
859 {
860 	const struct funeth_priv *fp = netdev_priv(netdev);
861 
862 	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
863 		return;
864 
865 	stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
866 	stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
867 	stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
868 	stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
869 	stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
870 	stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
871 	stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
872 }
873 
874 static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
875 				     struct ethtool_eth_ctrl_stats *stats)
876 {
877 	const struct funeth_priv *fp = netdev_priv(netdev);
878 
879 	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
880 		return;
881 
882 	stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
883 	stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
884 }
885 
886 static void fun_get_rmon_stats(struct net_device *netdev,
887 			       struct ethtool_rmon_stats *stats,
888 			       const struct ethtool_rmon_hist_range **ranges)
889 {
890 	static const struct ethtool_rmon_hist_range rmon_ranges[] = {
891 		{   64,    64 },
892 		{   65,   127 },
893 		{  128,   255 },
894 		{  256,   511 },
895 		{  512,  1023 },
896 		{ 1024,  1518 },
897 		{ 1519, 32767 },
898 		{}
899 	};
900 
901 	const struct funeth_priv *fp = netdev_priv(netdev);
902 
903 	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
904 		return;
905 
906 	stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
907 	stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
908 	stats->fragments = RX_STAT(fp, etherStatsFragments);
909 	stats->jabbers = RX_STAT(fp, etherStatsJabbers);
910 
911 	stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
912 	stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
913 	stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
914 	stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
915 	stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
916 	stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
917 	stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
918 
919 	stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
920 	stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
921 	stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
922 	stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
923 	stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
924 	stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
925 	stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
926 
927 	*ranges = rmon_ranges;
928 }
929 
930 static void fun_get_fec_stats(struct net_device *netdev,
931 			      struct ethtool_fec_stats *stats)
932 {
933 	const struct funeth_priv *fp = netdev_priv(netdev);
934 
935 	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
936 		return;
937 
938 	stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
939 	stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
940 }
941 
942 #undef RX_STAT
943 #undef TX_STAT
944 #undef FEC_STAT
945 
946 static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
947 			 u32 *rule_locs)
948 {
949 	switch (cmd->cmd) {
950 	case ETHTOOL_GRXRINGS:
951 		cmd->data = netdev->real_num_rx_queues;
952 		return 0;
953 	default:
954 		break;
955 	}
956 	return -EOPNOTSUPP;
957 }
958 
959 static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
960 {
961 	return 0;
962 }
963 
964 static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
965 {
966 	const struct funeth_priv *fp = netdev_priv(netdev);
967 
968 	return fp->indir_table_nentries;
969 }
970 
971 static u32 fun_get_rxfh_key_size(struct net_device *netdev)
972 {
973 	const struct funeth_priv *fp = netdev_priv(netdev);
974 
975 	return sizeof(fp->rss_key);
976 }
977 
978 static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
979 			u8 *hfunc)
980 {
981 	const struct funeth_priv *fp = netdev_priv(netdev);
982 
983 	if (!fp->rss_cfg)
984 		return -EOPNOTSUPP;
985 
986 	if (indir)
987 		memcpy(indir, fp->indir_table,
988 		       sizeof(u32) * fp->indir_table_nentries);
989 
990 	if (key)
991 		memcpy(key, fp->rss_key, sizeof(fp->rss_key));
992 
993 	if (hfunc)
994 		*hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
995 				ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
996 
997 	return 0;
998 }
999 
1000 static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
1001 			const u8 *key, const u8 hfunc)
1002 {
1003 	struct funeth_priv *fp = netdev_priv(netdev);
1004 	const u32 *rss_indir = indir ? indir : fp->indir_table;
1005 	const u8 *rss_key = key ? key : fp->rss_key;
1006 	enum fun_eth_hash_alg algo;
1007 
1008 	if (!fp->rss_cfg)
1009 		return -EOPNOTSUPP;
1010 
1011 	if (hfunc == ETH_RSS_HASH_NO_CHANGE)
1012 		algo = fp->hash_algo;
1013 	else if (hfunc == ETH_RSS_HASH_CRC32)
1014 		algo = FUN_ETH_RSS_ALG_CRC32;
1015 	else if (hfunc == ETH_RSS_HASH_TOP)
1016 		algo = FUN_ETH_RSS_ALG_TOEPLITZ;
1017 	else
1018 		return -EINVAL;
1019 
1020 	/* If the port is enabled try to reconfigure RSS and keep the new
1021 	 * settings if successful. If it is down we update the RSS settings
1022 	 * and apply them at the next UP time.
1023 	 */
1024 	if (netif_running(netdev)) {
1025 		int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
1026 					FUN_ADMIN_SUBOP_MODIFY);
1027 		if (rc)
1028 			return rc;
1029 	}
1030 
1031 	fp->hash_algo = algo;
1032 	if (key)
1033 		memcpy(fp->rss_key, key, sizeof(fp->rss_key));
1034 	if (indir)
1035 		memcpy(fp->indir_table, indir,
1036 		       sizeof(u32) * fp->indir_table_nentries);
1037 	return 0;
1038 }
1039 
1040 static int fun_get_ts_info(struct net_device *netdev,
1041 			   struct ethtool_ts_info *info)
1042 {
1043 	info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1044 				SOF_TIMESTAMPING_RX_HARDWARE |
1045 				SOF_TIMESTAMPING_TX_SOFTWARE |
1046 				SOF_TIMESTAMPING_SOFTWARE |
1047 				SOF_TIMESTAMPING_RAW_HARDWARE;
1048 	info->phc_index = -1;
1049 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
1050 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1051 	return 0;
1052 }
1053 
1054 static unsigned int to_ethtool_fec(unsigned int fun_fec)
1055 {
1056 	unsigned int fec = 0;
1057 
1058 	if (fun_fec == FUN_PORT_FEC_NA)
1059 		fec |= ETHTOOL_FEC_NONE;
1060 	if (fun_fec & FUN_PORT_FEC_OFF)
1061 		fec |= ETHTOOL_FEC_OFF;
1062 	if (fun_fec & FUN_PORT_FEC_RS)
1063 		fec |= ETHTOOL_FEC_RS;
1064 	if (fun_fec & FUN_PORT_FEC_FC)
1065 		fec |= ETHTOOL_FEC_BASER;
1066 	if (fun_fec & FUN_PORT_FEC_AUTO)
1067 		fec |= ETHTOOL_FEC_AUTO;
1068 	return fec;
1069 }
1070 
1071 static int fun_get_fecparam(struct net_device *netdev,
1072 			    struct ethtool_fecparam *fec)
1073 {
1074 	struct funeth_priv *fp = netdev_priv(netdev);
1075 	u64 fec_data;
1076 	int rc;
1077 
1078 	rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
1079 	if (rc)
1080 		return rc;
1081 
1082 	fec->active_fec = to_ethtool_fec(fec_data & 0xff);
1083 	fec->fec = to_ethtool_fec(fec_data >> 8);
1084 	return 0;
1085 }
1086 
1087 static int fun_set_fecparam(struct net_device *netdev,
1088 			    struct ethtool_fecparam *fec)
1089 {
1090 	struct funeth_priv *fp = netdev_priv(netdev);
1091 	u64 fec_mode;
1092 
1093 	switch (fec->fec) {
1094 	case ETHTOOL_FEC_AUTO:
1095 		fec_mode = FUN_PORT_FEC_AUTO;
1096 		break;
1097 	case ETHTOOL_FEC_OFF:
1098 		if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
1099 			return -EINVAL;
1100 		fec_mode = FUN_PORT_FEC_OFF;
1101 		break;
1102 	case ETHTOOL_FEC_BASER:
1103 		if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
1104 			return -EINVAL;
1105 		fec_mode = FUN_PORT_FEC_FC;
1106 		break;
1107 	case ETHTOOL_FEC_RS:
1108 		if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
1109 			return -EINVAL;
1110 		fec_mode = FUN_PORT_FEC_RS;
1111 		break;
1112 	default:
1113 		return -EINVAL;
1114 	}
1115 
1116 	return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
1117 }
1118 
1119 static const struct ethtool_ops fun_ethtool_ops = {
1120 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1121 				     ETHTOOL_COALESCE_MAX_FRAMES,
1122 	.get_link_ksettings  = fun_get_link_ksettings,
1123 	.set_link_ksettings  = fun_set_link_ksettings,
1124 	.set_phys_id         = fun_set_phys_id,
1125 	.get_drvinfo         = fun_get_drvinfo,
1126 	.get_msglevel        = fun_get_msglevel,
1127 	.set_msglevel        = fun_set_msglevel,
1128 	.get_regs_len        = fun_get_regs_len,
1129 	.get_regs            = fun_get_regs,
1130 	.get_link	     = ethtool_op_get_link,
1131 	.get_coalesce        = fun_get_coalesce,
1132 	.set_coalesce        = fun_set_coalesce,
1133 	.get_ts_info         = fun_get_ts_info,
1134 	.get_ringparam       = fun_get_ringparam,
1135 	.set_ringparam       = fun_set_ringparam,
1136 	.get_sset_count      = fun_get_sset_count,
1137 	.get_strings         = fun_get_strings,
1138 	.get_ethtool_stats   = fun_get_ethtool_stats,
1139 	.get_rxnfc	     = fun_get_rxnfc,
1140 	.set_rxnfc           = fun_set_rxnfc,
1141 	.get_rxfh_indir_size = fun_get_rxfh_indir_size,
1142 	.get_rxfh_key_size   = fun_get_rxfh_key_size,
1143 	.get_rxfh            = fun_get_rxfh,
1144 	.set_rxfh            = fun_set_rxfh,
1145 	.get_channels        = fun_get_channels,
1146 	.set_channels        = fun_set_channels,
1147 	.get_fecparam	     = fun_get_fecparam,
1148 	.set_fecparam	     = fun_set_fecparam,
1149 	.get_pauseparam      = fun_get_pauseparam,
1150 	.set_pauseparam      = fun_set_pauseparam,
1151 	.nway_reset          = fun_restart_an,
1152 	.get_pause_stats     = fun_get_pause_stats,
1153 	.get_fec_stats       = fun_get_fec_stats,
1154 	.get_eth_mac_stats   = fun_get_802_3_stats,
1155 	.get_eth_ctrl_stats  = fun_get_802_3_ctrl_stats,
1156 	.get_rmon_stats      = fun_get_rmon_stats,
1157 };
1158 
1159 void fun_set_ethtool_ops(struct net_device *netdev)
1160 {
1161 	netdev->ethtool_ops = &fun_ethtool_ops;
1162 }
1163