xref: /openbmc/linux/drivers/net/ethernet/sfc/ethtool.c (revision 715f23b6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2005-2006 Fen Systems Ltd.
5  * Copyright 2006-2013 Solarflare Communications Inc.
6  */
7 
8 #include <linux/netdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/in.h>
12 #include "net_driver.h"
13 #include "workarounds.h"
14 #include "selftest.h"
15 #include "efx.h"
16 #include "filter.h"
17 #include "nic.h"
18 
19 struct efx_sw_stat_desc {
20 	const char *name;
21 	enum {
22 		EFX_ETHTOOL_STAT_SOURCE_nic,
23 		EFX_ETHTOOL_STAT_SOURCE_channel,
24 		EFX_ETHTOOL_STAT_SOURCE_tx_queue
25 	} source;
26 	unsigned offset;
27 	u64(*get_stat) (void *field); /* Reader function */
28 };
29 
30 /* Initialiser for a struct efx_sw_stat_desc with type-checking */
31 #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
32 				get_stat_function) {			\
33 	.name = #stat_name,						\
34 	.source = EFX_ETHTOOL_STAT_SOURCE_##source_name,		\
35 	.offset = ((((field_type *) 0) ==				\
36 		      &((struct efx_##source_name *)0)->field) ?	\
37 		    offsetof(struct efx_##source_name, field) :		\
38 		    offsetof(struct efx_##source_name, field)),		\
39 	.get_stat = get_stat_function,					\
40 }
41 
42 static u64 efx_get_uint_stat(void *field)
43 {
44 	return *(unsigned int *)field;
45 }
46 
47 static u64 efx_get_atomic_stat(void *field)
48 {
49 	return atomic_read((atomic_t *) field);
50 }
51 
52 #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field)		\
53 	EFX_ETHTOOL_STAT(field, nic, field,			\
54 			 atomic_t, efx_get_atomic_stat)
55 
56 #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field)			\
57 	EFX_ETHTOOL_STAT(field, channel, n_##field,		\
58 			 unsigned int, efx_get_uint_stat)
59 #define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field)		\
60 	EFX_ETHTOOL_STAT(field, channel, field,			\
61 			 unsigned int, efx_get_uint_stat)
62 
63 #define EFX_ETHTOOL_UINT_TXQ_STAT(field)			\
64 	EFX_ETHTOOL_STAT(tx_##field, tx_queue, field,		\
65 			 unsigned int, efx_get_uint_stat)
66 
67 static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
68 	EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
69 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
70 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
71 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
72 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
73 	EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
74 	EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
75 	EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
76 	EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
77 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
78 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
79 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
80 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
81 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
82 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
83 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
84 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
85 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
86 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
87 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
88 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
89 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
90 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
91 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
92 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
93 #ifdef CONFIG_RFS_ACCEL
94 	EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
95 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
96 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
97 #endif
98 };
99 
100 #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
101 
102 #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
103 
104 /**************************************************************************
105  *
106  * Ethtool operations
107  *
108  **************************************************************************
109  */
110 
111 /* Identify device by flashing LEDs */
112 static int efx_ethtool_phys_id(struct net_device *net_dev,
113 			       enum ethtool_phys_id_state state)
114 {
115 	struct efx_nic *efx = netdev_priv(net_dev);
116 	enum efx_led_mode mode = EFX_LED_DEFAULT;
117 
118 	switch (state) {
119 	case ETHTOOL_ID_ON:
120 		mode = EFX_LED_ON;
121 		break;
122 	case ETHTOOL_ID_OFF:
123 		mode = EFX_LED_OFF;
124 		break;
125 	case ETHTOOL_ID_INACTIVE:
126 		mode = EFX_LED_DEFAULT;
127 		break;
128 	case ETHTOOL_ID_ACTIVE:
129 		return 1;	/* cycle on/off once per second */
130 	}
131 
132 	efx->type->set_id_led(efx, mode);
133 	return 0;
134 }
135 
136 /* This must be called with rtnl_lock held. */
137 static int
138 efx_ethtool_get_link_ksettings(struct net_device *net_dev,
139 			       struct ethtool_link_ksettings *cmd)
140 {
141 	struct efx_nic *efx = netdev_priv(net_dev);
142 	struct efx_link_state *link_state = &efx->link_state;
143 	u32 supported;
144 
145 	mutex_lock(&efx->mac_lock);
146 	efx->phy_op->get_link_ksettings(efx, cmd);
147 	mutex_unlock(&efx->mac_lock);
148 
149 	/* Both MACs support pause frames (bidirectional and respond-only) */
150 	ethtool_convert_link_mode_to_legacy_u32(&supported,
151 						cmd->link_modes.supported);
152 
153 	supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
154 
155 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
156 						supported);
157 
158 	if (LOOPBACK_INTERNAL(efx)) {
159 		cmd->base.speed = link_state->speed;
160 		cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
161 	}
162 
163 	return 0;
164 }
165 
166 /* This must be called with rtnl_lock held. */
167 static int
168 efx_ethtool_set_link_ksettings(struct net_device *net_dev,
169 			       const struct ethtool_link_ksettings *cmd)
170 {
171 	struct efx_nic *efx = netdev_priv(net_dev);
172 	int rc;
173 
174 	/* GMAC does not support 1000Mbps HD */
175 	if ((cmd->base.speed == SPEED_1000) &&
176 	    (cmd->base.duplex != DUPLEX_FULL)) {
177 		netif_dbg(efx, drv, efx->net_dev,
178 			  "rejecting unsupported 1000Mbps HD setting\n");
179 		return -EINVAL;
180 	}
181 
182 	mutex_lock(&efx->mac_lock);
183 	rc = efx->phy_op->set_link_ksettings(efx, cmd);
184 	mutex_unlock(&efx->mac_lock);
185 	return rc;
186 }
187 
188 static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
189 				    struct ethtool_drvinfo *info)
190 {
191 	struct efx_nic *efx = netdev_priv(net_dev);
192 
193 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
194 	strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
195 	efx_mcdi_print_fwver(efx, info->fw_version,
196 			     sizeof(info->fw_version));
197 	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
198 }
199 
200 static int efx_ethtool_get_regs_len(struct net_device *net_dev)
201 {
202 	return efx_nic_get_regs_len(netdev_priv(net_dev));
203 }
204 
205 static void efx_ethtool_get_regs(struct net_device *net_dev,
206 				 struct ethtool_regs *regs, void *buf)
207 {
208 	struct efx_nic *efx = netdev_priv(net_dev);
209 
210 	regs->version = efx->type->revision;
211 	efx_nic_get_regs(efx, buf);
212 }
213 
214 static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
215 {
216 	struct efx_nic *efx = netdev_priv(net_dev);
217 	return efx->msg_enable;
218 }
219 
220 static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
221 {
222 	struct efx_nic *efx = netdev_priv(net_dev);
223 	efx->msg_enable = msg_enable;
224 }
225 
226 /**
227  * efx_fill_test - fill in an individual self-test entry
228  * @test_index:		Index of the test
229  * @strings:		Ethtool strings, or %NULL
230  * @data:		Ethtool test results, or %NULL
231  * @test:		Pointer to test result (used only if data != %NULL)
232  * @unit_format:	Unit name format (e.g. "chan\%d")
233  * @unit_id:		Unit id (e.g. 0 for "chan0")
234  * @test_format:	Test name format (e.g. "loopback.\%s.tx.sent")
235  * @test_id:		Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
236  *
237  * Fill in an individual self-test entry.
238  */
239 static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
240 			  int *test, const char *unit_format, int unit_id,
241 			  const char *test_format, const char *test_id)
242 {
243 	char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
244 
245 	/* Fill data value, if applicable */
246 	if (data)
247 		data[test_index] = *test;
248 
249 	/* Fill string, if applicable */
250 	if (strings) {
251 		if (strchr(unit_format, '%'))
252 			snprintf(unit_str, sizeof(unit_str),
253 				 unit_format, unit_id);
254 		else
255 			strcpy(unit_str, unit_format);
256 		snprintf(test_str, sizeof(test_str), test_format, test_id);
257 		snprintf(strings + test_index * ETH_GSTRING_LEN,
258 			 ETH_GSTRING_LEN,
259 			 "%-6s %-24s", unit_str, test_str);
260 	}
261 }
262 
263 #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
264 #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
265 #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
266 #define EFX_LOOPBACK_NAME(_mode, _counter)			\
267 	"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
268 
269 /**
270  * efx_fill_loopback_test - fill in a block of loopback self-test entries
271  * @efx:		Efx NIC
272  * @lb_tests:		Efx loopback self-test results structure
273  * @mode:		Loopback test mode
274  * @test_index:		Starting index of the test
275  * @strings:		Ethtool strings, or %NULL
276  * @data:		Ethtool test results, or %NULL
277  *
278  * Fill in a block of loopback self-test entries.  Return new test
279  * index.
280  */
281 static int efx_fill_loopback_test(struct efx_nic *efx,
282 				  struct efx_loopback_self_tests *lb_tests,
283 				  enum efx_loopback_mode mode,
284 				  unsigned int test_index,
285 				  u8 *strings, u64 *data)
286 {
287 	struct efx_channel *channel =
288 		efx_get_channel(efx, efx->tx_channel_offset);
289 	struct efx_tx_queue *tx_queue;
290 
291 	efx_for_each_channel_tx_queue(tx_queue, channel) {
292 		efx_fill_test(test_index++, strings, data,
293 			      &lb_tests->tx_sent[tx_queue->queue],
294 			      EFX_TX_QUEUE_NAME(tx_queue),
295 			      EFX_LOOPBACK_NAME(mode, "tx_sent"));
296 		efx_fill_test(test_index++, strings, data,
297 			      &lb_tests->tx_done[tx_queue->queue],
298 			      EFX_TX_QUEUE_NAME(tx_queue),
299 			      EFX_LOOPBACK_NAME(mode, "tx_done"));
300 	}
301 	efx_fill_test(test_index++, strings, data,
302 		      &lb_tests->rx_good,
303 		      "rx", 0,
304 		      EFX_LOOPBACK_NAME(mode, "rx_good"));
305 	efx_fill_test(test_index++, strings, data,
306 		      &lb_tests->rx_bad,
307 		      "rx", 0,
308 		      EFX_LOOPBACK_NAME(mode, "rx_bad"));
309 
310 	return test_index;
311 }
312 
313 /**
314  * efx_ethtool_fill_self_tests - get self-test details
315  * @efx:		Efx NIC
316  * @tests:		Efx self-test results structure, or %NULL
317  * @strings:		Ethtool strings, or %NULL
318  * @data:		Ethtool test results, or %NULL
319  *
320  * Get self-test number of strings, strings, and/or test results.
321  * Return number of strings (== number of test results).
322  *
323  * The reason for merging these three functions is to make sure that
324  * they can never be inconsistent.
325  */
326 static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
327 				       struct efx_self_tests *tests,
328 				       u8 *strings, u64 *data)
329 {
330 	struct efx_channel *channel;
331 	unsigned int n = 0, i;
332 	enum efx_loopback_mode mode;
333 
334 	efx_fill_test(n++, strings, data, &tests->phy_alive,
335 		      "phy", 0, "alive", NULL);
336 	efx_fill_test(n++, strings, data, &tests->nvram,
337 		      "core", 0, "nvram", NULL);
338 	efx_fill_test(n++, strings, data, &tests->interrupt,
339 		      "core", 0, "interrupt", NULL);
340 
341 	/* Event queues */
342 	efx_for_each_channel(channel, efx) {
343 		efx_fill_test(n++, strings, data,
344 			      &tests->eventq_dma[channel->channel],
345 			      EFX_CHANNEL_NAME(channel),
346 			      "eventq.dma", NULL);
347 		efx_fill_test(n++, strings, data,
348 			      &tests->eventq_int[channel->channel],
349 			      EFX_CHANNEL_NAME(channel),
350 			      "eventq.int", NULL);
351 	}
352 
353 	efx_fill_test(n++, strings, data, &tests->memory,
354 		      "core", 0, "memory", NULL);
355 	efx_fill_test(n++, strings, data, &tests->registers,
356 		      "core", 0, "registers", NULL);
357 
358 	if (efx->phy_op->run_tests != NULL) {
359 		EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
360 
361 		for (i = 0; true; ++i) {
362 			const char *name;
363 
364 			EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
365 			name = efx->phy_op->test_name(efx, i);
366 			if (name == NULL)
367 				break;
368 
369 			efx_fill_test(n++, strings, data, &tests->phy_ext[i],
370 				      "phy", 0, name, NULL);
371 		}
372 	}
373 
374 	/* Loopback tests */
375 	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
376 		if (!(efx->loopback_modes & (1 << mode)))
377 			continue;
378 		n = efx_fill_loopback_test(efx,
379 					   &tests->loopback[mode], mode, n,
380 					   strings, data);
381 	}
382 
383 	return n;
384 }
385 
386 static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
387 {
388 	size_t n_stats = 0;
389 	struct efx_channel *channel;
390 
391 	efx_for_each_channel(channel, efx) {
392 		if (efx_channel_has_tx_queues(channel)) {
393 			n_stats++;
394 			if (strings != NULL) {
395 				snprintf(strings, ETH_GSTRING_LEN,
396 					 "tx-%u.tx_packets",
397 					 channel->tx_queue[0].queue /
398 					 EFX_TXQ_TYPES);
399 
400 				strings += ETH_GSTRING_LEN;
401 			}
402 		}
403 	}
404 	efx_for_each_channel(channel, efx) {
405 		if (efx_channel_has_rx_queue(channel)) {
406 			n_stats++;
407 			if (strings != NULL) {
408 				snprintf(strings, ETH_GSTRING_LEN,
409 					 "rx-%d.rx_packets", channel->channel);
410 				strings += ETH_GSTRING_LEN;
411 			}
412 		}
413 	}
414 	if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
415 		unsigned short xdp;
416 
417 		for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
418 			n_stats++;
419 			if (strings) {
420 				snprintf(strings, ETH_GSTRING_LEN,
421 					 "tx-xdp-cpu-%hu.tx_packets", xdp);
422 				strings += ETH_GSTRING_LEN;
423 			}
424 		}
425 	}
426 
427 	return n_stats;
428 }
429 
430 static int efx_ethtool_get_sset_count(struct net_device *net_dev,
431 				      int string_set)
432 {
433 	struct efx_nic *efx = netdev_priv(net_dev);
434 
435 	switch (string_set) {
436 	case ETH_SS_STATS:
437 		return efx->type->describe_stats(efx, NULL) +
438 		       EFX_ETHTOOL_SW_STAT_COUNT +
439 		       efx_describe_per_queue_stats(efx, NULL) +
440 		       efx_ptp_describe_stats(efx, NULL);
441 	case ETH_SS_TEST:
442 		return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
443 	default:
444 		return -EINVAL;
445 	}
446 }
447 
448 static void efx_ethtool_get_strings(struct net_device *net_dev,
449 				    u32 string_set, u8 *strings)
450 {
451 	struct efx_nic *efx = netdev_priv(net_dev);
452 	int i;
453 
454 	switch (string_set) {
455 	case ETH_SS_STATS:
456 		strings += (efx->type->describe_stats(efx, strings) *
457 			    ETH_GSTRING_LEN);
458 		for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
459 			strlcpy(strings + i * ETH_GSTRING_LEN,
460 				efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
461 		strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
462 		strings += (efx_describe_per_queue_stats(efx, strings) *
463 			    ETH_GSTRING_LEN);
464 		efx_ptp_describe_stats(efx, strings);
465 		break;
466 	case ETH_SS_TEST:
467 		efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
468 		break;
469 	default:
470 		/* No other string sets */
471 		break;
472 	}
473 }
474 
475 static void efx_ethtool_get_stats(struct net_device *net_dev,
476 				  struct ethtool_stats *stats,
477 				  u64 *data)
478 {
479 	struct efx_nic *efx = netdev_priv(net_dev);
480 	const struct efx_sw_stat_desc *stat;
481 	struct efx_channel *channel;
482 	struct efx_tx_queue *tx_queue;
483 	struct efx_rx_queue *rx_queue;
484 	int i;
485 
486 	spin_lock_bh(&efx->stats_lock);
487 
488 	/* Get NIC statistics */
489 	data += efx->type->update_stats(efx, data, NULL);
490 
491 	/* Get software statistics */
492 	for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
493 		stat = &efx_sw_stat_desc[i];
494 		switch (stat->source) {
495 		case EFX_ETHTOOL_STAT_SOURCE_nic:
496 			data[i] = stat->get_stat((void *)efx + stat->offset);
497 			break;
498 		case EFX_ETHTOOL_STAT_SOURCE_channel:
499 			data[i] = 0;
500 			efx_for_each_channel(channel, efx)
501 				data[i] += stat->get_stat((void *)channel +
502 							  stat->offset);
503 			break;
504 		case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
505 			data[i] = 0;
506 			efx_for_each_channel(channel, efx) {
507 				efx_for_each_channel_tx_queue(tx_queue, channel)
508 					data[i] +=
509 						stat->get_stat((void *)tx_queue
510 							       + stat->offset);
511 			}
512 			break;
513 		}
514 	}
515 	data += EFX_ETHTOOL_SW_STAT_COUNT;
516 
517 	spin_unlock_bh(&efx->stats_lock);
518 
519 	efx_for_each_channel(channel, efx) {
520 		if (efx_channel_has_tx_queues(channel)) {
521 			*data = 0;
522 			efx_for_each_channel_tx_queue(tx_queue, channel) {
523 				*data += tx_queue->tx_packets;
524 			}
525 			data++;
526 		}
527 	}
528 	efx_for_each_channel(channel, efx) {
529 		if (efx_channel_has_rx_queue(channel)) {
530 			*data = 0;
531 			efx_for_each_channel_rx_queue(rx_queue, channel) {
532 				*data += rx_queue->rx_packets;
533 			}
534 			data++;
535 		}
536 	}
537 	if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
538 		int xdp;
539 
540 		for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
541 			data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
542 			data++;
543 		}
544 	}
545 
546 	efx_ptp_update_stats(efx, data);
547 }
548 
549 static void efx_ethtool_self_test(struct net_device *net_dev,
550 				  struct ethtool_test *test, u64 *data)
551 {
552 	struct efx_nic *efx = netdev_priv(net_dev);
553 	struct efx_self_tests *efx_tests;
554 	bool already_up;
555 	int rc = -ENOMEM;
556 
557 	efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
558 	if (!efx_tests)
559 		goto fail;
560 
561 	if (efx->state != STATE_READY) {
562 		rc = -EBUSY;
563 		goto out;
564 	}
565 
566 	netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
567 		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
568 
569 	/* We need rx buffers and interrupts. */
570 	already_up = (efx->net_dev->flags & IFF_UP);
571 	if (!already_up) {
572 		rc = dev_open(efx->net_dev, NULL);
573 		if (rc) {
574 			netif_err(efx, drv, efx->net_dev,
575 				  "failed opening device.\n");
576 			goto out;
577 		}
578 	}
579 
580 	rc = efx_selftest(efx, efx_tests, test->flags);
581 
582 	if (!already_up)
583 		dev_close(efx->net_dev);
584 
585 	netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
586 		   rc == 0 ? "passed" : "failed",
587 		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
588 
589 out:
590 	efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
591 	kfree(efx_tests);
592 fail:
593 	if (rc)
594 		test->flags |= ETH_TEST_FL_FAILED;
595 }
596 
597 /* Restart autonegotiation */
598 static int efx_ethtool_nway_reset(struct net_device *net_dev)
599 {
600 	struct efx_nic *efx = netdev_priv(net_dev);
601 
602 	return mdio45_nway_restart(&efx->mdio);
603 }
604 
605 /*
606  * Each channel has a single IRQ and moderation timer, started by any
607  * completion (or other event).  Unless the module parameter
608  * separate_tx_channels is set, IRQs and moderation are therefore
609  * shared between RX and TX completions.  In this case, when RX IRQ
610  * moderation is explicitly changed then TX IRQ moderation is
611  * automatically changed too, but otherwise we fail if the two values
612  * are requested to be different.
613  *
614  * The hardware does not support a limit on the number of completions
615  * before an IRQ, so we do not use the max_frames fields.  We should
616  * report and require that max_frames == (usecs != 0), but this would
617  * invalidate existing user documentation.
618  *
619  * The hardware does not have distinct settings for interrupt
620  * moderation while the previous IRQ is being handled, so we should
621  * not use the 'irq' fields.  However, an earlier developer
622  * misunderstood the meaning of the 'irq' fields and the driver did
623  * not support the standard fields.  To avoid invalidating existing
624  * user documentation, we report and accept changes through either the
625  * standard or 'irq' fields.  If both are changed at the same time, we
626  * prefer the standard field.
627  *
628  * We implement adaptive IRQ moderation, but use a different algorithm
629  * from that assumed in the definition of struct ethtool_coalesce.
630  * Therefore we do not use any of the adaptive moderation parameters
631  * in it.
632  */
633 
634 static int efx_ethtool_get_coalesce(struct net_device *net_dev,
635 				    struct ethtool_coalesce *coalesce)
636 {
637 	struct efx_nic *efx = netdev_priv(net_dev);
638 	unsigned int tx_usecs, rx_usecs;
639 	bool rx_adaptive;
640 
641 	efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
642 
643 	coalesce->tx_coalesce_usecs = tx_usecs;
644 	coalesce->tx_coalesce_usecs_irq = tx_usecs;
645 	coalesce->rx_coalesce_usecs = rx_usecs;
646 	coalesce->rx_coalesce_usecs_irq = rx_usecs;
647 	coalesce->use_adaptive_rx_coalesce = rx_adaptive;
648 
649 	return 0;
650 }
651 
652 static int efx_ethtool_set_coalesce(struct net_device *net_dev,
653 				    struct ethtool_coalesce *coalesce)
654 {
655 	struct efx_nic *efx = netdev_priv(net_dev);
656 	struct efx_channel *channel;
657 	unsigned int tx_usecs, rx_usecs;
658 	bool adaptive, rx_may_override_tx;
659 	int rc;
660 
661 	if (coalesce->use_adaptive_tx_coalesce)
662 		return -EINVAL;
663 
664 	efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
665 
666 	if (coalesce->rx_coalesce_usecs != rx_usecs)
667 		rx_usecs = coalesce->rx_coalesce_usecs;
668 	else
669 		rx_usecs = coalesce->rx_coalesce_usecs_irq;
670 
671 	adaptive = coalesce->use_adaptive_rx_coalesce;
672 
673 	/* If channels are shared, TX IRQ moderation can be quietly
674 	 * overridden unless it is changed from its old value.
675 	 */
676 	rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
677 			      coalesce->tx_coalesce_usecs_irq == tx_usecs);
678 	if (coalesce->tx_coalesce_usecs != tx_usecs)
679 		tx_usecs = coalesce->tx_coalesce_usecs;
680 	else
681 		tx_usecs = coalesce->tx_coalesce_usecs_irq;
682 
683 	rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
684 				     rx_may_override_tx);
685 	if (rc != 0)
686 		return rc;
687 
688 	efx_for_each_channel(channel, efx)
689 		efx->type->push_irq_moderation(channel);
690 
691 	return 0;
692 }
693 
694 static void efx_ethtool_get_ringparam(struct net_device *net_dev,
695 				      struct ethtool_ringparam *ring)
696 {
697 	struct efx_nic *efx = netdev_priv(net_dev);
698 
699 	ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
700 	ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
701 	ring->rx_pending = efx->rxq_entries;
702 	ring->tx_pending = efx->txq_entries;
703 }
704 
705 static int efx_ethtool_set_ringparam(struct net_device *net_dev,
706 				     struct ethtool_ringparam *ring)
707 {
708 	struct efx_nic *efx = netdev_priv(net_dev);
709 	u32 txq_entries;
710 
711 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
712 	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
713 	    ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
714 		return -EINVAL;
715 
716 	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
717 		netif_err(efx, drv, efx->net_dev,
718 			  "RX queues cannot be smaller than %u\n",
719 			  EFX_RXQ_MIN_ENT);
720 		return -EINVAL;
721 	}
722 
723 	txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
724 	if (txq_entries != ring->tx_pending)
725 		netif_warn(efx, drv, efx->net_dev,
726 			   "increasing TX queue size to minimum of %u\n",
727 			   txq_entries);
728 
729 	return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
730 }
731 
732 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
733 				      struct ethtool_pauseparam *pause)
734 {
735 	struct efx_nic *efx = netdev_priv(net_dev);
736 	u8 wanted_fc, old_fc;
737 	u32 old_adv;
738 	int rc = 0;
739 
740 	mutex_lock(&efx->mac_lock);
741 
742 	wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
743 		     (pause->tx_pause ? EFX_FC_TX : 0) |
744 		     (pause->autoneg ? EFX_FC_AUTO : 0));
745 
746 	if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
747 		netif_dbg(efx, drv, efx->net_dev,
748 			  "Flow control unsupported: tx ON rx OFF\n");
749 		rc = -EINVAL;
750 		goto out;
751 	}
752 
753 	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
754 		netif_dbg(efx, drv, efx->net_dev,
755 			  "Autonegotiation is disabled\n");
756 		rc = -EINVAL;
757 		goto out;
758 	}
759 
760 	/* Hook for Falcon bug 11482 workaround */
761 	if (efx->type->prepare_enable_fc_tx &&
762 	    (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
763 		efx->type->prepare_enable_fc_tx(efx);
764 
765 	old_adv = efx->link_advertising[0];
766 	old_fc = efx->wanted_fc;
767 	efx_link_set_wanted_fc(efx, wanted_fc);
768 	if (efx->link_advertising[0] != old_adv ||
769 	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
770 		rc = efx->phy_op->reconfigure(efx);
771 		if (rc) {
772 			netif_err(efx, drv, efx->net_dev,
773 				  "Unable to advertise requested flow "
774 				  "control setting\n");
775 			goto out;
776 		}
777 	}
778 
779 	/* Reconfigure the MAC. The PHY *may* generate a link state change event
780 	 * if the user just changed the advertised capabilities, but there's no
781 	 * harm doing this twice */
782 	efx_mac_reconfigure(efx);
783 
784 out:
785 	mutex_unlock(&efx->mac_lock);
786 
787 	return rc;
788 }
789 
790 static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
791 				       struct ethtool_pauseparam *pause)
792 {
793 	struct efx_nic *efx = netdev_priv(net_dev);
794 
795 	pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
796 	pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
797 	pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
798 }
799 
800 static void efx_ethtool_get_wol(struct net_device *net_dev,
801 				struct ethtool_wolinfo *wol)
802 {
803 	struct efx_nic *efx = netdev_priv(net_dev);
804 	return efx->type->get_wol(efx, wol);
805 }
806 
807 
808 static int efx_ethtool_set_wol(struct net_device *net_dev,
809 			       struct ethtool_wolinfo *wol)
810 {
811 	struct efx_nic *efx = netdev_priv(net_dev);
812 	return efx->type->set_wol(efx, wol->wolopts);
813 }
814 
815 static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
816 {
817 	struct efx_nic *efx = netdev_priv(net_dev);
818 	int rc;
819 
820 	rc = efx->type->map_reset_flags(flags);
821 	if (rc < 0)
822 		return rc;
823 
824 	return efx_reset(efx, rc);
825 }
826 
827 /* MAC address mask including only I/G bit */
828 static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
829 
830 #define IP4_ADDR_FULL_MASK	((__force __be32)~0)
831 #define IP_PROTO_FULL_MASK	0xFF
832 #define PORT_FULL_MASK		((__force __be16)~0)
833 #define ETHER_TYPE_FULL_MASK	((__force __be16)~0)
834 
835 static inline void ip6_fill_mask(__be32 *mask)
836 {
837 	mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
838 }
839 
840 static int efx_ethtool_get_class_rule(struct efx_nic *efx,
841 				      struct ethtool_rx_flow_spec *rule,
842 				      u32 *rss_context)
843 {
844 	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
845 	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
846 	struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
847 	struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
848 	struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
849 	struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
850 	struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
851 	struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
852 	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
853 	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
854 	struct efx_filter_spec spec;
855 	int rc;
856 
857 	rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
858 					rule->location, &spec);
859 	if (rc)
860 		return rc;
861 
862 	if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
863 		rule->ring_cookie = RX_CLS_FLOW_DISC;
864 	else
865 		rule->ring_cookie = spec.dmaq_id;
866 
867 	if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
868 	    spec.ether_type == htons(ETH_P_IP) &&
869 	    (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
870 	    (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
871 	    !(spec.match_flags &
872 	      ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
873 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
874 		EFX_FILTER_MATCH_IP_PROTO |
875 		EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
876 		rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
877 				   TCP_V4_FLOW : UDP_V4_FLOW);
878 		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
879 			ip_entry->ip4dst = spec.loc_host[0];
880 			ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
881 		}
882 		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
883 			ip_entry->ip4src = spec.rem_host[0];
884 			ip_mask->ip4src = IP4_ADDR_FULL_MASK;
885 		}
886 		if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
887 			ip_entry->pdst = spec.loc_port;
888 			ip_mask->pdst = PORT_FULL_MASK;
889 		}
890 		if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
891 			ip_entry->psrc = spec.rem_port;
892 			ip_mask->psrc = PORT_FULL_MASK;
893 		}
894 	} else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
895 	    spec.ether_type == htons(ETH_P_IPV6) &&
896 	    (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
897 	    (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
898 	    !(spec.match_flags &
899 	      ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
900 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
901 		EFX_FILTER_MATCH_IP_PROTO |
902 		EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
903 		rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
904 				   TCP_V6_FLOW : UDP_V6_FLOW);
905 		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
906 			memcpy(ip6_entry->ip6dst, spec.loc_host,
907 			       sizeof(ip6_entry->ip6dst));
908 			ip6_fill_mask(ip6_mask->ip6dst);
909 		}
910 		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
911 			memcpy(ip6_entry->ip6src, spec.rem_host,
912 			       sizeof(ip6_entry->ip6src));
913 			ip6_fill_mask(ip6_mask->ip6src);
914 		}
915 		if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
916 			ip6_entry->pdst = spec.loc_port;
917 			ip6_mask->pdst = PORT_FULL_MASK;
918 		}
919 		if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
920 			ip6_entry->psrc = spec.rem_port;
921 			ip6_mask->psrc = PORT_FULL_MASK;
922 		}
923 	} else if (!(spec.match_flags &
924 		     ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
925 		       EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
926 		       EFX_FILTER_MATCH_OUTER_VID))) {
927 		rule->flow_type = ETHER_FLOW;
928 		if (spec.match_flags &
929 		    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
930 			ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
931 			if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
932 				eth_broadcast_addr(mac_mask->h_dest);
933 			else
934 				ether_addr_copy(mac_mask->h_dest,
935 						mac_addr_ig_mask);
936 		}
937 		if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
938 			ether_addr_copy(mac_entry->h_source, spec.rem_mac);
939 			eth_broadcast_addr(mac_mask->h_source);
940 		}
941 		if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
942 			mac_entry->h_proto = spec.ether_type;
943 			mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
944 		}
945 	} else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
946 		   spec.ether_type == htons(ETH_P_IP) &&
947 		   !(spec.match_flags &
948 		     ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
949 		       EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
950 		       EFX_FILTER_MATCH_IP_PROTO))) {
951 		rule->flow_type = IPV4_USER_FLOW;
952 		uip_entry->ip_ver = ETH_RX_NFC_IP4;
953 		if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
954 			uip_mask->proto = IP_PROTO_FULL_MASK;
955 			uip_entry->proto = spec.ip_proto;
956 		}
957 		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
958 			uip_entry->ip4dst = spec.loc_host[0];
959 			uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
960 		}
961 		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
962 			uip_entry->ip4src = spec.rem_host[0];
963 			uip_mask->ip4src = IP4_ADDR_FULL_MASK;
964 		}
965 	} else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
966 		   spec.ether_type == htons(ETH_P_IPV6) &&
967 		   !(spec.match_flags &
968 		     ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
969 		       EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
970 		       EFX_FILTER_MATCH_IP_PROTO))) {
971 		rule->flow_type = IPV6_USER_FLOW;
972 		if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
973 			uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
974 			uip6_entry->l4_proto = spec.ip_proto;
975 		}
976 		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
977 			memcpy(uip6_entry->ip6dst, spec.loc_host,
978 			       sizeof(uip6_entry->ip6dst));
979 			ip6_fill_mask(uip6_mask->ip6dst);
980 		}
981 		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
982 			memcpy(uip6_entry->ip6src, spec.rem_host,
983 			       sizeof(uip6_entry->ip6src));
984 			ip6_fill_mask(uip6_mask->ip6src);
985 		}
986 	} else {
987 		/* The above should handle all filters that we insert */
988 		WARN_ON(1);
989 		return -EINVAL;
990 	}
991 
992 	if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
993 		rule->flow_type |= FLOW_EXT;
994 		rule->h_ext.vlan_tci = spec.outer_vid;
995 		rule->m_ext.vlan_tci = htons(0xfff);
996 	}
997 
998 	if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
999 		rule->flow_type |= FLOW_RSS;
1000 		*rss_context = spec.rss_context;
1001 	}
1002 
1003 	return rc;
1004 }
1005 
1006 static int
1007 efx_ethtool_get_rxnfc(struct net_device *net_dev,
1008 		      struct ethtool_rxnfc *info, u32 *rule_locs)
1009 {
1010 	struct efx_nic *efx = netdev_priv(net_dev);
1011 	u32 rss_context = 0;
1012 	s32 rc = 0;
1013 
1014 	switch (info->cmd) {
1015 	case ETHTOOL_GRXRINGS:
1016 		info->data = efx->n_rx_channels;
1017 		return 0;
1018 
1019 	case ETHTOOL_GRXFH: {
1020 		struct efx_rss_context *ctx = &efx->rss_context;
1021 
1022 		mutex_lock(&efx->rss_lock);
1023 		if (info->flow_type & FLOW_RSS && info->rss_context) {
1024 			ctx = efx_find_rss_context_entry(efx, info->rss_context);
1025 			if (!ctx) {
1026 				rc = -ENOENT;
1027 				goto out_unlock;
1028 			}
1029 		}
1030 		info->data = 0;
1031 		if (!efx_rss_active(ctx)) /* No RSS */
1032 			goto out_unlock;
1033 		switch (info->flow_type & ~FLOW_RSS) {
1034 		case UDP_V4_FLOW:
1035 			if (ctx->rx_hash_udp_4tuple)
1036 				/* fall through */
1037 		case TCP_V4_FLOW:
1038 				info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1039 			/* fall through */
1040 		case SCTP_V4_FLOW:
1041 		case AH_ESP_V4_FLOW:
1042 		case IPV4_FLOW:
1043 			info->data |= RXH_IP_SRC | RXH_IP_DST;
1044 			break;
1045 		case UDP_V6_FLOW:
1046 			if (ctx->rx_hash_udp_4tuple)
1047 				/* fall through */
1048 		case TCP_V6_FLOW:
1049 				info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1050 			/* fall through */
1051 		case SCTP_V6_FLOW:
1052 		case AH_ESP_V6_FLOW:
1053 		case IPV6_FLOW:
1054 			info->data |= RXH_IP_SRC | RXH_IP_DST;
1055 			break;
1056 		default:
1057 			break;
1058 		}
1059 out_unlock:
1060 		mutex_unlock(&efx->rss_lock);
1061 		return rc;
1062 	}
1063 
1064 	case ETHTOOL_GRXCLSRLCNT:
1065 		info->data = efx_filter_get_rx_id_limit(efx);
1066 		if (info->data == 0)
1067 			return -EOPNOTSUPP;
1068 		info->data |= RX_CLS_LOC_SPECIAL;
1069 		info->rule_cnt =
1070 			efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
1071 		return 0;
1072 
1073 	case ETHTOOL_GRXCLSRULE:
1074 		if (efx_filter_get_rx_id_limit(efx) == 0)
1075 			return -EOPNOTSUPP;
1076 		rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
1077 		if (rc < 0)
1078 			return rc;
1079 		if (info->fs.flow_type & FLOW_RSS)
1080 			info->rss_context = rss_context;
1081 		return 0;
1082 
1083 	case ETHTOOL_GRXCLSRLALL:
1084 		info->data = efx_filter_get_rx_id_limit(efx);
1085 		if (info->data == 0)
1086 			return -EOPNOTSUPP;
1087 		rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
1088 					   rule_locs, info->rule_cnt);
1089 		if (rc < 0)
1090 			return rc;
1091 		info->rule_cnt = rc;
1092 		return 0;
1093 
1094 	default:
1095 		return -EOPNOTSUPP;
1096 	}
1097 }
1098 
1099 static inline bool ip6_mask_is_full(__be32 mask[4])
1100 {
1101 	return !~(mask[0] & mask[1] & mask[2] & mask[3]);
1102 }
1103 
1104 static inline bool ip6_mask_is_empty(__be32 mask[4])
1105 {
1106 	return !(mask[0] | mask[1] | mask[2] | mask[3]);
1107 }
1108 
1109 static int efx_ethtool_set_class_rule(struct efx_nic *efx,
1110 				      struct ethtool_rx_flow_spec *rule,
1111 				      u32 rss_context)
1112 {
1113 	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
1114 	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
1115 	struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
1116 	struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
1117 	struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
1118 	struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
1119 	struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
1120 	struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
1121 	u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
1122 	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
1123 	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
1124 	enum efx_filter_flags flags = 0;
1125 	struct efx_filter_spec spec;
1126 	int rc;
1127 
1128 	/* Check that user wants us to choose the location */
1129 	if (rule->location != RX_CLS_LOC_ANY)
1130 		return -EINVAL;
1131 
1132 	/* Range-check ring_cookie */
1133 	if (rule->ring_cookie >= efx->n_rx_channels &&
1134 	    rule->ring_cookie != RX_CLS_FLOW_DISC)
1135 		return -EINVAL;
1136 
1137 	/* Check for unsupported extensions */
1138 	if ((rule->flow_type & FLOW_EXT) &&
1139 	    (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
1140 	     rule->m_ext.data[1]))
1141 		return -EINVAL;
1142 
1143 	if (efx->rx_scatter)
1144 		flags |= EFX_FILTER_FLAG_RX_SCATTER;
1145 	if (rule->flow_type & FLOW_RSS)
1146 		flags |= EFX_FILTER_FLAG_RX_RSS;
1147 
1148 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
1149 			   (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
1150 			   EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
1151 
1152 	if (rule->flow_type & FLOW_RSS)
1153 		spec.rss_context = rss_context;
1154 
1155 	switch (flow_type) {
1156 	case TCP_V4_FLOW:
1157 	case UDP_V4_FLOW:
1158 		spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
1159 				    EFX_FILTER_MATCH_IP_PROTO);
1160 		spec.ether_type = htons(ETH_P_IP);
1161 		spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
1162 							 : IPPROTO_UDP;
1163 		if (ip_mask->ip4dst) {
1164 			if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1165 				return -EINVAL;
1166 			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
1167 			spec.loc_host[0] = ip_entry->ip4dst;
1168 		}
1169 		if (ip_mask->ip4src) {
1170 			if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
1171 				return -EINVAL;
1172 			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
1173 			spec.rem_host[0] = ip_entry->ip4src;
1174 		}
1175 		if (ip_mask->pdst) {
1176 			if (ip_mask->pdst != PORT_FULL_MASK)
1177 				return -EINVAL;
1178 			spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
1179 			spec.loc_port = ip_entry->pdst;
1180 		}
1181 		if (ip_mask->psrc) {
1182 			if (ip_mask->psrc != PORT_FULL_MASK)
1183 				return -EINVAL;
1184 			spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
1185 			spec.rem_port = ip_entry->psrc;
1186 		}
1187 		if (ip_mask->tos)
1188 			return -EINVAL;
1189 		break;
1190 
1191 	case TCP_V6_FLOW:
1192 	case UDP_V6_FLOW:
1193 		spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
1194 				    EFX_FILTER_MATCH_IP_PROTO);
1195 		spec.ether_type = htons(ETH_P_IPV6);
1196 		spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
1197 							 : IPPROTO_UDP;
1198 		if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
1199 			if (!ip6_mask_is_full(ip6_mask->ip6dst))
1200 				return -EINVAL;
1201 			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
1202 			memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
1203 		}
1204 		if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
1205 			if (!ip6_mask_is_full(ip6_mask->ip6src))
1206 				return -EINVAL;
1207 			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
1208 			memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
1209 		}
1210 		if (ip6_mask->pdst) {
1211 			if (ip6_mask->pdst != PORT_FULL_MASK)
1212 				return -EINVAL;
1213 			spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
1214 			spec.loc_port = ip6_entry->pdst;
1215 		}
1216 		if (ip6_mask->psrc) {
1217 			if (ip6_mask->psrc != PORT_FULL_MASK)
1218 				return -EINVAL;
1219 			spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
1220 			spec.rem_port = ip6_entry->psrc;
1221 		}
1222 		if (ip6_mask->tclass)
1223 			return -EINVAL;
1224 		break;
1225 
1226 	case IPV4_USER_FLOW:
1227 		if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
1228 		    uip_entry->ip_ver != ETH_RX_NFC_IP4)
1229 			return -EINVAL;
1230 		spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
1231 		spec.ether_type = htons(ETH_P_IP);
1232 		if (uip_mask->ip4dst) {
1233 			if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1234 				return -EINVAL;
1235 			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
1236 			spec.loc_host[0] = uip_entry->ip4dst;
1237 		}
1238 		if (uip_mask->ip4src) {
1239 			if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
1240 				return -EINVAL;
1241 			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
1242 			spec.rem_host[0] = uip_entry->ip4src;
1243 		}
1244 		if (uip_mask->proto) {
1245 			if (uip_mask->proto != IP_PROTO_FULL_MASK)
1246 				return -EINVAL;
1247 			spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
1248 			spec.ip_proto = uip_entry->proto;
1249 		}
1250 		break;
1251 
1252 	case IPV6_USER_FLOW:
1253 		if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
1254 			return -EINVAL;
1255 		spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
1256 		spec.ether_type = htons(ETH_P_IPV6);
1257 		if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
1258 			if (!ip6_mask_is_full(uip6_mask->ip6dst))
1259 				return -EINVAL;
1260 			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
1261 			memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
1262 		}
1263 		if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
1264 			if (!ip6_mask_is_full(uip6_mask->ip6src))
1265 				return -EINVAL;
1266 			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
1267 			memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
1268 		}
1269 		if (uip6_mask->l4_proto) {
1270 			if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
1271 				return -EINVAL;
1272 			spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
1273 			spec.ip_proto = uip6_entry->l4_proto;
1274 		}
1275 		break;
1276 
1277 	case ETHER_FLOW:
1278 		if (!is_zero_ether_addr(mac_mask->h_dest)) {
1279 			if (ether_addr_equal(mac_mask->h_dest,
1280 					     mac_addr_ig_mask))
1281 				spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
1282 			else if (is_broadcast_ether_addr(mac_mask->h_dest))
1283 				spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
1284 			else
1285 				return -EINVAL;
1286 			ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
1287 		}
1288 		if (!is_zero_ether_addr(mac_mask->h_source)) {
1289 			if (!is_broadcast_ether_addr(mac_mask->h_source))
1290 				return -EINVAL;
1291 			spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
1292 			ether_addr_copy(spec.rem_mac, mac_entry->h_source);
1293 		}
1294 		if (mac_mask->h_proto) {
1295 			if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
1296 				return -EINVAL;
1297 			spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1298 			spec.ether_type = mac_entry->h_proto;
1299 		}
1300 		break;
1301 
1302 	default:
1303 		return -EINVAL;
1304 	}
1305 
1306 	if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
1307 		if (rule->m_ext.vlan_tci != htons(0xfff))
1308 			return -EINVAL;
1309 		spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
1310 		spec.outer_vid = rule->h_ext.vlan_tci;
1311 	}
1312 
1313 	rc = efx_filter_insert_filter(efx, &spec, true);
1314 	if (rc < 0)
1315 		return rc;
1316 
1317 	rule->location = rc;
1318 	return 0;
1319 }
1320 
1321 static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
1322 				 struct ethtool_rxnfc *info)
1323 {
1324 	struct efx_nic *efx = netdev_priv(net_dev);
1325 
1326 	if (efx_filter_get_rx_id_limit(efx) == 0)
1327 		return -EOPNOTSUPP;
1328 
1329 	switch (info->cmd) {
1330 	case ETHTOOL_SRXCLSRLINS:
1331 		return efx_ethtool_set_class_rule(efx, &info->fs,
1332 						  info->rss_context);
1333 
1334 	case ETHTOOL_SRXCLSRLDEL:
1335 		return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
1336 						 info->fs.location);
1337 
1338 	default:
1339 		return -EOPNOTSUPP;
1340 	}
1341 }
1342 
1343 static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1344 {
1345 	struct efx_nic *efx = netdev_priv(net_dev);
1346 
1347 	if (efx->n_rx_channels == 1)
1348 		return 0;
1349 	return ARRAY_SIZE(efx->rss_context.rx_indir_table);
1350 }
1351 
1352 static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
1353 {
1354 	struct efx_nic *efx = netdev_priv(net_dev);
1355 
1356 	return efx->type->rx_hash_key_size;
1357 }
1358 
1359 static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
1360 				u8 *hfunc)
1361 {
1362 	struct efx_nic *efx = netdev_priv(net_dev);
1363 	int rc;
1364 
1365 	rc = efx->type->rx_pull_rss_config(efx);
1366 	if (rc)
1367 		return rc;
1368 
1369 	if (hfunc)
1370 		*hfunc = ETH_RSS_HASH_TOP;
1371 	if (indir)
1372 		memcpy(indir, efx->rss_context.rx_indir_table,
1373 		       sizeof(efx->rss_context.rx_indir_table));
1374 	if (key)
1375 		memcpy(key, efx->rss_context.rx_hash_key,
1376 		       efx->type->rx_hash_key_size);
1377 	return 0;
1378 }
1379 
1380 static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
1381 				const u8 *key, const u8 hfunc)
1382 {
1383 	struct efx_nic *efx = netdev_priv(net_dev);
1384 
1385 	/* Hash function is Toeplitz, cannot be changed */
1386 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1387 		return -EOPNOTSUPP;
1388 	if (!indir && !key)
1389 		return 0;
1390 
1391 	if (!key)
1392 		key = efx->rss_context.rx_hash_key;
1393 	if (!indir)
1394 		indir = efx->rss_context.rx_indir_table;
1395 
1396 	return efx->type->rx_push_rss_config(efx, true, indir, key);
1397 }
1398 
1399 static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
1400 					u8 *key, u8 *hfunc, u32 rss_context)
1401 {
1402 	struct efx_nic *efx = netdev_priv(net_dev);
1403 	struct efx_rss_context *ctx;
1404 	int rc = 0;
1405 
1406 	if (!efx->type->rx_pull_rss_context_config)
1407 		return -EOPNOTSUPP;
1408 
1409 	mutex_lock(&efx->rss_lock);
1410 	ctx = efx_find_rss_context_entry(efx, rss_context);
1411 	if (!ctx) {
1412 		rc = -ENOENT;
1413 		goto out_unlock;
1414 	}
1415 	rc = efx->type->rx_pull_rss_context_config(efx, ctx);
1416 	if (rc)
1417 		goto out_unlock;
1418 
1419 	if (hfunc)
1420 		*hfunc = ETH_RSS_HASH_TOP;
1421 	if (indir)
1422 		memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
1423 	if (key)
1424 		memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
1425 out_unlock:
1426 	mutex_unlock(&efx->rss_lock);
1427 	return rc;
1428 }
1429 
1430 static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
1431 					const u32 *indir, const u8 *key,
1432 					const u8 hfunc, u32 *rss_context,
1433 					bool delete)
1434 {
1435 	struct efx_nic *efx = netdev_priv(net_dev);
1436 	struct efx_rss_context *ctx;
1437 	bool allocated = false;
1438 	int rc;
1439 
1440 	if (!efx->type->rx_push_rss_context_config)
1441 		return -EOPNOTSUPP;
1442 	/* Hash function is Toeplitz, cannot be changed */
1443 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1444 		return -EOPNOTSUPP;
1445 
1446 	mutex_lock(&efx->rss_lock);
1447 
1448 	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
1449 		if (delete) {
1450 			/* alloc + delete == Nothing to do */
1451 			rc = -EINVAL;
1452 			goto out_unlock;
1453 		}
1454 		ctx = efx_alloc_rss_context_entry(efx);
1455 		if (!ctx) {
1456 			rc = -ENOMEM;
1457 			goto out_unlock;
1458 		}
1459 		ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
1460 		/* Initialise indir table and key to defaults */
1461 		efx_set_default_rx_indir_table(efx, ctx);
1462 		netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
1463 		allocated = true;
1464 	} else {
1465 		ctx = efx_find_rss_context_entry(efx, *rss_context);
1466 		if (!ctx) {
1467 			rc = -ENOENT;
1468 			goto out_unlock;
1469 		}
1470 	}
1471 
1472 	if (delete) {
1473 		/* delete this context */
1474 		rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
1475 		if (!rc)
1476 			efx_free_rss_context_entry(ctx);
1477 		goto out_unlock;
1478 	}
1479 
1480 	if (!key)
1481 		key = ctx->rx_hash_key;
1482 	if (!indir)
1483 		indir = ctx->rx_indir_table;
1484 
1485 	rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
1486 	if (rc && allocated)
1487 		efx_free_rss_context_entry(ctx);
1488 	else
1489 		*rss_context = ctx->user_id;
1490 out_unlock:
1491 	mutex_unlock(&efx->rss_lock);
1492 	return rc;
1493 }
1494 
1495 static int efx_ethtool_get_ts_info(struct net_device *net_dev,
1496 				   struct ethtool_ts_info *ts_info)
1497 {
1498 	struct efx_nic *efx = netdev_priv(net_dev);
1499 
1500 	/* Software capabilities */
1501 	ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
1502 				    SOF_TIMESTAMPING_SOFTWARE);
1503 	ts_info->phc_index = -1;
1504 
1505 	efx_ptp_get_ts_info(efx, ts_info);
1506 	return 0;
1507 }
1508 
1509 static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
1510 					 struct ethtool_eeprom *ee,
1511 					 u8 *data)
1512 {
1513 	struct efx_nic *efx = netdev_priv(net_dev);
1514 	int ret;
1515 
1516 	if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
1517 		return -EOPNOTSUPP;
1518 
1519 	mutex_lock(&efx->mac_lock);
1520 	ret = efx->phy_op->get_module_eeprom(efx, ee, data);
1521 	mutex_unlock(&efx->mac_lock);
1522 
1523 	return ret;
1524 }
1525 
1526 static int efx_ethtool_get_module_info(struct net_device *net_dev,
1527 				       struct ethtool_modinfo *modinfo)
1528 {
1529 	struct efx_nic *efx = netdev_priv(net_dev);
1530 	int ret;
1531 
1532 	if (!efx->phy_op || !efx->phy_op->get_module_info)
1533 		return -EOPNOTSUPP;
1534 
1535 	mutex_lock(&efx->mac_lock);
1536 	ret = efx->phy_op->get_module_info(efx, modinfo);
1537 	mutex_unlock(&efx->mac_lock);
1538 
1539 	return ret;
1540 }
1541 
1542 static int efx_ethtool_get_fecparam(struct net_device *net_dev,
1543 				    struct ethtool_fecparam *fecparam)
1544 {
1545 	struct efx_nic *efx = netdev_priv(net_dev);
1546 	int rc;
1547 
1548 	if (!efx->phy_op || !efx->phy_op->get_fecparam)
1549 		return -EOPNOTSUPP;
1550 	mutex_lock(&efx->mac_lock);
1551 	rc = efx->phy_op->get_fecparam(efx, fecparam);
1552 	mutex_unlock(&efx->mac_lock);
1553 
1554 	return rc;
1555 }
1556 
1557 static int efx_ethtool_set_fecparam(struct net_device *net_dev,
1558 				    struct ethtool_fecparam *fecparam)
1559 {
1560 	struct efx_nic *efx = netdev_priv(net_dev);
1561 	int rc;
1562 
1563 	if (!efx->phy_op || !efx->phy_op->get_fecparam)
1564 		return -EOPNOTSUPP;
1565 	mutex_lock(&efx->mac_lock);
1566 	rc = efx->phy_op->set_fecparam(efx, fecparam);
1567 	mutex_unlock(&efx->mac_lock);
1568 
1569 	return rc;
1570 }
1571 
1572 const struct ethtool_ops efx_ethtool_ops = {
1573 	.get_drvinfo		= efx_ethtool_get_drvinfo,
1574 	.get_regs_len		= efx_ethtool_get_regs_len,
1575 	.get_regs		= efx_ethtool_get_regs,
1576 	.get_msglevel		= efx_ethtool_get_msglevel,
1577 	.set_msglevel		= efx_ethtool_set_msglevel,
1578 	.nway_reset		= efx_ethtool_nway_reset,
1579 	.get_link		= ethtool_op_get_link,
1580 	.get_coalesce		= efx_ethtool_get_coalesce,
1581 	.set_coalesce		= efx_ethtool_set_coalesce,
1582 	.get_ringparam		= efx_ethtool_get_ringparam,
1583 	.set_ringparam		= efx_ethtool_set_ringparam,
1584 	.get_pauseparam         = efx_ethtool_get_pauseparam,
1585 	.set_pauseparam         = efx_ethtool_set_pauseparam,
1586 	.get_sset_count		= efx_ethtool_get_sset_count,
1587 	.self_test		= efx_ethtool_self_test,
1588 	.get_strings		= efx_ethtool_get_strings,
1589 	.set_phys_id		= efx_ethtool_phys_id,
1590 	.get_ethtool_stats	= efx_ethtool_get_stats,
1591 	.get_wol                = efx_ethtool_get_wol,
1592 	.set_wol                = efx_ethtool_set_wol,
1593 	.reset			= efx_ethtool_reset,
1594 	.get_rxnfc		= efx_ethtool_get_rxnfc,
1595 	.set_rxnfc		= efx_ethtool_set_rxnfc,
1596 	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
1597 	.get_rxfh_key_size	= efx_ethtool_get_rxfh_key_size,
1598 	.get_rxfh		= efx_ethtool_get_rxfh,
1599 	.set_rxfh		= efx_ethtool_set_rxfh,
1600 	.get_rxfh_context	= efx_ethtool_get_rxfh_context,
1601 	.set_rxfh_context	= efx_ethtool_set_rxfh_context,
1602 	.get_ts_info		= efx_ethtool_get_ts_info,
1603 	.get_module_info	= efx_ethtool_get_module_info,
1604 	.get_module_eeprom	= efx_ethtool_get_module_eeprom,
1605 	.get_link_ksettings	= efx_ethtool_get_link_ksettings,
1606 	.set_link_ksettings	= efx_ethtool_set_link_ksettings,
1607 	.get_fecparam		= efx_ethtool_get_fecparam,
1608 	.set_fecparam		= efx_ethtool_set_fecparam,
1609 };
1610