xref: /openbmc/linux/drivers/net/ethernet/sfc/ethtool.c (revision 05bcf503)
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2010 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/netdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/in.h>
15 #include "net_driver.h"
16 #include "workarounds.h"
17 #include "selftest.h"
18 #include "efx.h"
19 #include "filter.h"
20 #include "nic.h"
21 
22 struct ethtool_string {
23 	char name[ETH_GSTRING_LEN];
24 };
25 
26 struct efx_ethtool_stat {
27 	const char *name;
28 	enum {
29 		EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 		EFX_ETHTOOL_STAT_SOURCE_nic,
31 		EFX_ETHTOOL_STAT_SOURCE_channel,
32 		EFX_ETHTOOL_STAT_SOURCE_tx_queue
33 	} source;
34 	unsigned offset;
35 	u64(*get_stat) (void *field); /* Reader function */
36 };
37 
38 /* Initialiser for a struct #efx_ethtool_stat with type-checking */
39 #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
40 				get_stat_function) {			\
41 	.name = #stat_name,						\
42 	.source = EFX_ETHTOOL_STAT_SOURCE_##source_name,		\
43 	.offset = ((((field_type *) 0) ==				\
44 		      &((struct efx_##source_name *)0)->field) ?	\
45 		    offsetof(struct efx_##source_name, field) :		\
46 		    offsetof(struct efx_##source_name, field)),		\
47 	.get_stat = get_stat_function,					\
48 }
49 
50 static u64 efx_get_uint_stat(void *field)
51 {
52 	return *(unsigned int *)field;
53 }
54 
55 static u64 efx_get_u64_stat(void *field)
56 {
57 	return *(u64 *) field;
58 }
59 
60 static u64 efx_get_atomic_stat(void *field)
61 {
62 	return atomic_read((atomic_t *) field);
63 }
64 
65 #define EFX_ETHTOOL_U64_MAC_STAT(field)				\
66 	EFX_ETHTOOL_STAT(field, mac_stats, field,		\
67 			  u64, efx_get_u64_stat)
68 
69 #define EFX_ETHTOOL_UINT_NIC_STAT(name)				\
70 	EFX_ETHTOOL_STAT(name, nic, n_##name,			\
71 			 unsigned int, efx_get_uint_stat)
72 
73 #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field)		\
74 	EFX_ETHTOOL_STAT(field, nic, field,			\
75 			 atomic_t, efx_get_atomic_stat)
76 
77 #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field)			\
78 	EFX_ETHTOOL_STAT(field, channel, n_##field,		\
79 			 unsigned int, efx_get_uint_stat)
80 
81 #define EFX_ETHTOOL_UINT_TXQ_STAT(field)			\
82 	EFX_ETHTOOL_STAT(tx_##field, tx_queue, field,		\
83 			 unsigned int, efx_get_uint_stat)
84 
85 static const struct efx_ethtool_stat efx_ethtool_stats[] = {
86 	EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
87 	EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
88 	EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
89 	EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
90 	EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
91 	EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
92 	EFX_ETHTOOL_U64_MAC_STAT(tx_control),
93 	EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
94 	EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
95 	EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
96 	EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
97 	EFX_ETHTOOL_U64_MAC_STAT(tx_64),
98 	EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
99 	EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
100 	EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
101 	EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
102 	EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
103 	EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
104 	EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
105 	EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
106 	EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
107 	EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
108 	EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
109 	EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
110 	EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
111 	EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
112 	EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
113 	EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
114 	EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
115 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
116 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
117 	EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
118 	EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 	EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 	EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 	EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
122 	EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
123 	EFX_ETHTOOL_U64_MAC_STAT(rx_good),
124 	EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
125 	EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
126 	EFX_ETHTOOL_U64_MAC_STAT(rx_control),
127 	EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
128 	EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
129 	EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
130 	EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
131 	EFX_ETHTOOL_U64_MAC_STAT(rx_64),
132 	EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
133 	EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
134 	EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
135 	EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
136 	EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
137 	EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
138 	EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
139 	EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
140 	EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
141 	EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
142 	EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
143 	EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
144 	EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
145 	EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
146 	EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
147 	EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
148 	EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
149 	EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
150 	EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
151 	EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
152 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
153 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
154 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
155 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
156 	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157 };
158 
159 /* Number of ethtool statistics */
160 #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
161 
162 #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
163 
164 /**************************************************************************
165  *
166  * Ethtool operations
167  *
168  **************************************************************************
169  */
170 
171 /* Identify device by flashing LEDs */
172 static int efx_ethtool_phys_id(struct net_device *net_dev,
173 			       enum ethtool_phys_id_state state)
174 {
175 	struct efx_nic *efx = netdev_priv(net_dev);
176 	enum efx_led_mode mode = EFX_LED_DEFAULT;
177 
178 	switch (state) {
179 	case ETHTOOL_ID_ON:
180 		mode = EFX_LED_ON;
181 		break;
182 	case ETHTOOL_ID_OFF:
183 		mode = EFX_LED_OFF;
184 		break;
185 	case ETHTOOL_ID_INACTIVE:
186 		mode = EFX_LED_DEFAULT;
187 		break;
188 	case ETHTOOL_ID_ACTIVE:
189 		return 1;	/* cycle on/off once per second */
190 	}
191 
192 	efx->type->set_id_led(efx, mode);
193 	return 0;
194 }
195 
196 /* This must be called with rtnl_lock held. */
197 static int efx_ethtool_get_settings(struct net_device *net_dev,
198 				    struct ethtool_cmd *ecmd)
199 {
200 	struct efx_nic *efx = netdev_priv(net_dev);
201 	struct efx_link_state *link_state = &efx->link_state;
202 
203 	mutex_lock(&efx->mac_lock);
204 	efx->phy_op->get_settings(efx, ecmd);
205 	mutex_unlock(&efx->mac_lock);
206 
207 	/* GMAC does not support 1000Mbps HD */
208 	ecmd->supported &= ~SUPPORTED_1000baseT_Half;
209 	/* Both MACs support pause frames (bidirectional and respond-only) */
210 	ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
211 
212 	if (LOOPBACK_INTERNAL(efx)) {
213 		ethtool_cmd_speed_set(ecmd, link_state->speed);
214 		ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
215 	}
216 
217 	return 0;
218 }
219 
220 /* This must be called with rtnl_lock held. */
221 static int efx_ethtool_set_settings(struct net_device *net_dev,
222 				    struct ethtool_cmd *ecmd)
223 {
224 	struct efx_nic *efx = netdev_priv(net_dev);
225 	int rc;
226 
227 	/* GMAC does not support 1000Mbps HD */
228 	if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
229 	    (ecmd->duplex != DUPLEX_FULL)) {
230 		netif_dbg(efx, drv, efx->net_dev,
231 			  "rejecting unsupported 1000Mbps HD setting\n");
232 		return -EINVAL;
233 	}
234 
235 	mutex_lock(&efx->mac_lock);
236 	rc = efx->phy_op->set_settings(efx, ecmd);
237 	mutex_unlock(&efx->mac_lock);
238 	return rc;
239 }
240 
241 static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
242 				    struct ethtool_drvinfo *info)
243 {
244 	struct efx_nic *efx = netdev_priv(net_dev);
245 
246 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
247 	strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
248 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
249 		efx_mcdi_print_fwver(efx, info->fw_version,
250 				     sizeof(info->fw_version));
251 	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
252 }
253 
254 static int efx_ethtool_get_regs_len(struct net_device *net_dev)
255 {
256 	return efx_nic_get_regs_len(netdev_priv(net_dev));
257 }
258 
259 static void efx_ethtool_get_regs(struct net_device *net_dev,
260 				 struct ethtool_regs *regs, void *buf)
261 {
262 	struct efx_nic *efx = netdev_priv(net_dev);
263 
264 	regs->version = efx->type->revision;
265 	efx_nic_get_regs(efx, buf);
266 }
267 
268 static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
269 {
270 	struct efx_nic *efx = netdev_priv(net_dev);
271 	return efx->msg_enable;
272 }
273 
274 static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
275 {
276 	struct efx_nic *efx = netdev_priv(net_dev);
277 	efx->msg_enable = msg_enable;
278 }
279 
280 /**
281  * efx_fill_test - fill in an individual self-test entry
282  * @test_index:		Index of the test
283  * @strings:		Ethtool strings, or %NULL
284  * @data:		Ethtool test results, or %NULL
285  * @test:		Pointer to test result (used only if data != %NULL)
286  * @unit_format:	Unit name format (e.g. "chan\%d")
287  * @unit_id:		Unit id (e.g. 0 for "chan0")
288  * @test_format:	Test name format (e.g. "loopback.\%s.tx.sent")
289  * @test_id:		Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
290  *
291  * Fill in an individual self-test entry.
292  */
293 static void efx_fill_test(unsigned int test_index,
294 			  struct ethtool_string *strings, u64 *data,
295 			  int *test, const char *unit_format, int unit_id,
296 			  const char *test_format, const char *test_id)
297 {
298 	struct ethtool_string unit_str, test_str;
299 
300 	/* Fill data value, if applicable */
301 	if (data)
302 		data[test_index] = *test;
303 
304 	/* Fill string, if applicable */
305 	if (strings) {
306 		if (strchr(unit_format, '%'))
307 			snprintf(unit_str.name, sizeof(unit_str.name),
308 				 unit_format, unit_id);
309 		else
310 			strcpy(unit_str.name, unit_format);
311 		snprintf(test_str.name, sizeof(test_str.name),
312 			 test_format, test_id);
313 		snprintf(strings[test_index].name,
314 			 sizeof(strings[test_index].name),
315 			 "%-6s %-24s", unit_str.name, test_str.name);
316 	}
317 }
318 
319 #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
320 #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
321 #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
322 #define EFX_LOOPBACK_NAME(_mode, _counter)			\
323 	"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
324 
325 /**
326  * efx_fill_loopback_test - fill in a block of loopback self-test entries
327  * @efx:		Efx NIC
328  * @lb_tests:		Efx loopback self-test results structure
329  * @mode:		Loopback test mode
330  * @test_index:		Starting index of the test
331  * @strings:		Ethtool strings, or %NULL
332  * @data:		Ethtool test results, or %NULL
333  */
334 static int efx_fill_loopback_test(struct efx_nic *efx,
335 				  struct efx_loopback_self_tests *lb_tests,
336 				  enum efx_loopback_mode mode,
337 				  unsigned int test_index,
338 				  struct ethtool_string *strings, u64 *data)
339 {
340 	struct efx_channel *channel =
341 		efx_get_channel(efx, efx->tx_channel_offset);
342 	struct efx_tx_queue *tx_queue;
343 
344 	efx_for_each_channel_tx_queue(tx_queue, channel) {
345 		efx_fill_test(test_index++, strings, data,
346 			      &lb_tests->tx_sent[tx_queue->queue],
347 			      EFX_TX_QUEUE_NAME(tx_queue),
348 			      EFX_LOOPBACK_NAME(mode, "tx_sent"));
349 		efx_fill_test(test_index++, strings, data,
350 			      &lb_tests->tx_done[tx_queue->queue],
351 			      EFX_TX_QUEUE_NAME(tx_queue),
352 			      EFX_LOOPBACK_NAME(mode, "tx_done"));
353 	}
354 	efx_fill_test(test_index++, strings, data,
355 		      &lb_tests->rx_good,
356 		      "rx", 0,
357 		      EFX_LOOPBACK_NAME(mode, "rx_good"));
358 	efx_fill_test(test_index++, strings, data,
359 		      &lb_tests->rx_bad,
360 		      "rx", 0,
361 		      EFX_LOOPBACK_NAME(mode, "rx_bad"));
362 
363 	return test_index;
364 }
365 
366 /**
367  * efx_ethtool_fill_self_tests - get self-test details
368  * @efx:		Efx NIC
369  * @tests:		Efx self-test results structure, or %NULL
370  * @strings:		Ethtool strings, or %NULL
371  * @data:		Ethtool test results, or %NULL
372  */
373 static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
374 				       struct efx_self_tests *tests,
375 				       struct ethtool_string *strings,
376 				       u64 *data)
377 {
378 	struct efx_channel *channel;
379 	unsigned int n = 0, i;
380 	enum efx_loopback_mode mode;
381 
382 	efx_fill_test(n++, strings, data, &tests->phy_alive,
383 		      "phy", 0, "alive", NULL);
384 	efx_fill_test(n++, strings, data, &tests->nvram,
385 		      "core", 0, "nvram", NULL);
386 	efx_fill_test(n++, strings, data, &tests->interrupt,
387 		      "core", 0, "interrupt", NULL);
388 
389 	/* Event queues */
390 	efx_for_each_channel(channel, efx) {
391 		efx_fill_test(n++, strings, data,
392 			      &tests->eventq_dma[channel->channel],
393 			      EFX_CHANNEL_NAME(channel),
394 			      "eventq.dma", NULL);
395 		efx_fill_test(n++, strings, data,
396 			      &tests->eventq_int[channel->channel],
397 			      EFX_CHANNEL_NAME(channel),
398 			      "eventq.int", NULL);
399 	}
400 
401 	efx_fill_test(n++, strings, data, &tests->registers,
402 		      "core", 0, "registers", NULL);
403 
404 	if (efx->phy_op->run_tests != NULL) {
405 		EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
406 
407 		for (i = 0; true; ++i) {
408 			const char *name;
409 
410 			EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
411 			name = efx->phy_op->test_name(efx, i);
412 			if (name == NULL)
413 				break;
414 
415 			efx_fill_test(n++, strings, data, &tests->phy_ext[i],
416 				      "phy", 0, name, NULL);
417 		}
418 	}
419 
420 	/* Loopback tests */
421 	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
422 		if (!(efx->loopback_modes & (1 << mode)))
423 			continue;
424 		n = efx_fill_loopback_test(efx,
425 					   &tests->loopback[mode], mode, n,
426 					   strings, data);
427 	}
428 
429 	return n;
430 }
431 
432 static int efx_ethtool_get_sset_count(struct net_device *net_dev,
433 				      int string_set)
434 {
435 	switch (string_set) {
436 	case ETH_SS_STATS:
437 		return EFX_ETHTOOL_NUM_STATS;
438 	case ETH_SS_TEST:
439 		return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
440 						   NULL, NULL, NULL);
441 	default:
442 		return -EINVAL;
443 	}
444 }
445 
446 static void efx_ethtool_get_strings(struct net_device *net_dev,
447 				    u32 string_set, u8 *strings)
448 {
449 	struct efx_nic *efx = netdev_priv(net_dev);
450 	struct ethtool_string *ethtool_strings =
451 		(struct ethtool_string *)strings;
452 	int i;
453 
454 	switch (string_set) {
455 	case ETH_SS_STATS:
456 		for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
457 			strlcpy(ethtool_strings[i].name,
458 				efx_ethtool_stats[i].name,
459 				sizeof(ethtool_strings[i].name));
460 		break;
461 	case ETH_SS_TEST:
462 		efx_ethtool_fill_self_tests(efx, NULL,
463 					    ethtool_strings, NULL);
464 		break;
465 	default:
466 		/* No other string sets */
467 		break;
468 	}
469 }
470 
471 static void efx_ethtool_get_stats(struct net_device *net_dev,
472 				  struct ethtool_stats *stats,
473 				  u64 *data)
474 {
475 	struct efx_nic *efx = netdev_priv(net_dev);
476 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
477 	const struct efx_ethtool_stat *stat;
478 	struct efx_channel *channel;
479 	struct efx_tx_queue *tx_queue;
480 	int i;
481 
482 	EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
483 
484 	spin_lock_bh(&efx->stats_lock);
485 
486 	/* Update MAC and NIC statistics */
487 	efx->type->update_stats(efx);
488 
489 	/* Fill detailed statistics buffer */
490 	for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
491 		stat = &efx_ethtool_stats[i];
492 		switch (stat->source) {
493 		case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
494 			data[i] = stat->get_stat((void *)mac_stats +
495 						 stat->offset);
496 			break;
497 		case EFX_ETHTOOL_STAT_SOURCE_nic:
498 			data[i] = stat->get_stat((void *)efx + stat->offset);
499 			break;
500 		case EFX_ETHTOOL_STAT_SOURCE_channel:
501 			data[i] = 0;
502 			efx_for_each_channel(channel, efx)
503 				data[i] += stat->get_stat((void *)channel +
504 							  stat->offset);
505 			break;
506 		case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
507 			data[i] = 0;
508 			efx_for_each_channel(channel, efx) {
509 				efx_for_each_channel_tx_queue(tx_queue, channel)
510 					data[i] +=
511 						stat->get_stat((void *)tx_queue
512 							       + stat->offset);
513 			}
514 			break;
515 		}
516 	}
517 
518 	spin_unlock_bh(&efx->stats_lock);
519 }
520 
521 static void efx_ethtool_self_test(struct net_device *net_dev,
522 				  struct ethtool_test *test, u64 *data)
523 {
524 	struct efx_nic *efx = netdev_priv(net_dev);
525 	struct efx_self_tests *efx_tests;
526 	int already_up;
527 	int rc = -ENOMEM;
528 
529 	efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
530 	if (!efx_tests)
531 		goto fail;
532 
533 	if (efx->state != STATE_READY) {
534 		rc = -EIO;
535 		goto fail1;
536 	}
537 
538 	netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
539 		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
540 
541 	/* We need rx buffers and interrupts. */
542 	already_up = (efx->net_dev->flags & IFF_UP);
543 	if (!already_up) {
544 		rc = dev_open(efx->net_dev);
545 		if (rc) {
546 			netif_err(efx, drv, efx->net_dev,
547 				  "failed opening device.\n");
548 			goto fail1;
549 		}
550 	}
551 
552 	rc = efx_selftest(efx, efx_tests, test->flags);
553 
554 	if (!already_up)
555 		dev_close(efx->net_dev);
556 
557 	netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
558 		   rc == 0 ? "passed" : "failed",
559 		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
560 
561 fail1:
562 	/* Fill ethtool results structures */
563 	efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
564 	kfree(efx_tests);
565 fail:
566 	if (rc)
567 		test->flags |= ETH_TEST_FL_FAILED;
568 }
569 
570 /* Restart autonegotiation */
571 static int efx_ethtool_nway_reset(struct net_device *net_dev)
572 {
573 	struct efx_nic *efx = netdev_priv(net_dev);
574 
575 	return mdio45_nway_restart(&efx->mdio);
576 }
577 
578 /*
579  * Each channel has a single IRQ and moderation timer, started by any
580  * completion (or other event).  Unless the module parameter
581  * separate_tx_channels is set, IRQs and moderation are therefore
582  * shared between RX and TX completions.  In this case, when RX IRQ
583  * moderation is explicitly changed then TX IRQ moderation is
584  * automatically changed too, but otherwise we fail if the two values
585  * are requested to be different.
586  *
587  * The hardware does not support a limit on the number of completions
588  * before an IRQ, so we do not use the max_frames fields.  We should
589  * report and require that max_frames == (usecs != 0), but this would
590  * invalidate existing user documentation.
591  *
592  * The hardware does not have distinct settings for interrupt
593  * moderation while the previous IRQ is being handled, so we should
594  * not use the 'irq' fields.  However, an earlier developer
595  * misunderstood the meaning of the 'irq' fields and the driver did
596  * not support the standard fields.  To avoid invalidating existing
597  * user documentation, we report and accept changes through either the
598  * standard or 'irq' fields.  If both are changed at the same time, we
599  * prefer the standard field.
600  *
601  * We implement adaptive IRQ moderation, but use a different algorithm
602  * from that assumed in the definition of struct ethtool_coalesce.
603  * Therefore we do not use any of the adaptive moderation parameters
604  * in it.
605  */
606 
607 static int efx_ethtool_get_coalesce(struct net_device *net_dev,
608 				    struct ethtool_coalesce *coalesce)
609 {
610 	struct efx_nic *efx = netdev_priv(net_dev);
611 	unsigned int tx_usecs, rx_usecs;
612 	bool rx_adaptive;
613 
614 	efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
615 
616 	coalesce->tx_coalesce_usecs = tx_usecs;
617 	coalesce->tx_coalesce_usecs_irq = tx_usecs;
618 	coalesce->rx_coalesce_usecs = rx_usecs;
619 	coalesce->rx_coalesce_usecs_irq = rx_usecs;
620 	coalesce->use_adaptive_rx_coalesce = rx_adaptive;
621 
622 	return 0;
623 }
624 
625 static int efx_ethtool_set_coalesce(struct net_device *net_dev,
626 				    struct ethtool_coalesce *coalesce)
627 {
628 	struct efx_nic *efx = netdev_priv(net_dev);
629 	struct efx_channel *channel;
630 	unsigned int tx_usecs, rx_usecs;
631 	bool adaptive, rx_may_override_tx;
632 	int rc;
633 
634 	if (coalesce->use_adaptive_tx_coalesce)
635 		return -EINVAL;
636 
637 	efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
638 
639 	if (coalesce->rx_coalesce_usecs != rx_usecs)
640 		rx_usecs = coalesce->rx_coalesce_usecs;
641 	else
642 		rx_usecs = coalesce->rx_coalesce_usecs_irq;
643 
644 	adaptive = coalesce->use_adaptive_rx_coalesce;
645 
646 	/* If channels are shared, TX IRQ moderation can be quietly
647 	 * overridden unless it is changed from its old value.
648 	 */
649 	rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
650 			      coalesce->tx_coalesce_usecs_irq == tx_usecs);
651 	if (coalesce->tx_coalesce_usecs != tx_usecs)
652 		tx_usecs = coalesce->tx_coalesce_usecs;
653 	else
654 		tx_usecs = coalesce->tx_coalesce_usecs_irq;
655 
656 	rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
657 				     rx_may_override_tx);
658 	if (rc != 0)
659 		return rc;
660 
661 	efx_for_each_channel(channel, efx)
662 		efx->type->push_irq_moderation(channel);
663 
664 	return 0;
665 }
666 
667 static void efx_ethtool_get_ringparam(struct net_device *net_dev,
668 				      struct ethtool_ringparam *ring)
669 {
670 	struct efx_nic *efx = netdev_priv(net_dev);
671 
672 	ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
673 	ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
674 	ring->rx_pending = efx->rxq_entries;
675 	ring->tx_pending = efx->txq_entries;
676 }
677 
678 static int efx_ethtool_set_ringparam(struct net_device *net_dev,
679 				     struct ethtool_ringparam *ring)
680 {
681 	struct efx_nic *efx = netdev_priv(net_dev);
682 	u32 txq_entries;
683 
684 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
685 	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
686 	    ring->tx_pending > EFX_MAX_DMAQ_SIZE)
687 		return -EINVAL;
688 
689 	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
690 		netif_err(efx, drv, efx->net_dev,
691 			  "RX queues cannot be smaller than %u\n",
692 			  EFX_RXQ_MIN_ENT);
693 		return -EINVAL;
694 	}
695 
696 	txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
697 	if (txq_entries != ring->tx_pending)
698 		netif_warn(efx, drv, efx->net_dev,
699 			   "increasing TX queue size to minimum of %u\n",
700 			   txq_entries);
701 
702 	return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
703 }
704 
705 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
706 				      struct ethtool_pauseparam *pause)
707 {
708 	struct efx_nic *efx = netdev_priv(net_dev);
709 	u8 wanted_fc, old_fc;
710 	u32 old_adv;
711 	bool reset;
712 	int rc = 0;
713 
714 	mutex_lock(&efx->mac_lock);
715 
716 	wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
717 		     (pause->tx_pause ? EFX_FC_TX : 0) |
718 		     (pause->autoneg ? EFX_FC_AUTO : 0));
719 
720 	if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
721 		netif_dbg(efx, drv, efx->net_dev,
722 			  "Flow control unsupported: tx ON rx OFF\n");
723 		rc = -EINVAL;
724 		goto out;
725 	}
726 
727 	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
728 		netif_dbg(efx, drv, efx->net_dev,
729 			  "Autonegotiation is disabled\n");
730 		rc = -EINVAL;
731 		goto out;
732 	}
733 
734 	/* TX flow control may automatically turn itself off if the
735 	 * link partner (intermittently) stops responding to pause
736 	 * frames. There isn't any indication that this has happened,
737 	 * so the best we do is leave it up to the user to spot this
738 	 * and fix it be cycling transmit flow control on this end. */
739 	reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
740 	if (EFX_WORKAROUND_11482(efx) && reset) {
741 		if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
742 			/* Recover by resetting the EM block */
743 			falcon_stop_nic_stats(efx);
744 			falcon_drain_tx_fifo(efx);
745 			falcon_reconfigure_xmac(efx);
746 			falcon_start_nic_stats(efx);
747 		} else {
748 			/* Schedule a reset to recover */
749 			efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
750 		}
751 	}
752 
753 	old_adv = efx->link_advertising;
754 	old_fc = efx->wanted_fc;
755 	efx_link_set_wanted_fc(efx, wanted_fc);
756 	if (efx->link_advertising != old_adv ||
757 	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
758 		rc = efx->phy_op->reconfigure(efx);
759 		if (rc) {
760 			netif_err(efx, drv, efx->net_dev,
761 				  "Unable to advertise requested flow "
762 				  "control setting\n");
763 			goto out;
764 		}
765 	}
766 
767 	/* Reconfigure the MAC. The PHY *may* generate a link state change event
768 	 * if the user just changed the advertised capabilities, but there's no
769 	 * harm doing this twice */
770 	efx->type->reconfigure_mac(efx);
771 
772 out:
773 	mutex_unlock(&efx->mac_lock);
774 
775 	return rc;
776 }
777 
778 static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
779 				       struct ethtool_pauseparam *pause)
780 {
781 	struct efx_nic *efx = netdev_priv(net_dev);
782 
783 	pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
784 	pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
785 	pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
786 }
787 
788 
789 static void efx_ethtool_get_wol(struct net_device *net_dev,
790 				struct ethtool_wolinfo *wol)
791 {
792 	struct efx_nic *efx = netdev_priv(net_dev);
793 	return efx->type->get_wol(efx, wol);
794 }
795 
796 
797 static int efx_ethtool_set_wol(struct net_device *net_dev,
798 			       struct ethtool_wolinfo *wol)
799 {
800 	struct efx_nic *efx = netdev_priv(net_dev);
801 	return efx->type->set_wol(efx, wol->wolopts);
802 }
803 
804 static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
805 {
806 	struct efx_nic *efx = netdev_priv(net_dev);
807 	int rc;
808 
809 	rc = efx->type->map_reset_flags(flags);
810 	if (rc < 0)
811 		return rc;
812 
813 	return efx_reset(efx, rc);
814 }
815 
816 /* MAC address mask including only MC flag */
817 static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
818 
819 static int efx_ethtool_get_class_rule(struct efx_nic *efx,
820 				      struct ethtool_rx_flow_spec *rule)
821 {
822 	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
823 	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
824 	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
825 	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
826 	struct efx_filter_spec spec;
827 	u16 vid;
828 	u8 proto;
829 	int rc;
830 
831 	rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
832 					rule->location, &spec);
833 	if (rc)
834 		return rc;
835 
836 	if (spec.dmaq_id == 0xfff)
837 		rule->ring_cookie = RX_CLS_FLOW_DISC;
838 	else
839 		rule->ring_cookie = spec.dmaq_id;
840 
841 	if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
842 		rule->flow_type = ETHER_FLOW;
843 		memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
844 		if (spec.type == EFX_FILTER_MC_DEF)
845 			memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
846 		return 0;
847 	}
848 
849 	rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
850 	if (rc == 0) {
851 		rule->flow_type = ETHER_FLOW;
852 		memset(mac_mask->h_dest, ~0, ETH_ALEN);
853 		if (vid != EFX_FILTER_VID_UNSPEC) {
854 			rule->flow_type |= FLOW_EXT;
855 			rule->h_ext.vlan_tci = htons(vid);
856 			rule->m_ext.vlan_tci = htons(0xfff);
857 		}
858 		return 0;
859 	}
860 
861 	rc = efx_filter_get_ipv4_local(&spec, &proto,
862 				       &ip_entry->ip4dst, &ip_entry->pdst);
863 	if (rc != 0) {
864 		rc = efx_filter_get_ipv4_full(
865 			&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
866 			&ip_entry->ip4src, &ip_entry->psrc);
867 		EFX_WARN_ON_PARANOID(rc);
868 		ip_mask->ip4src = ~0;
869 		ip_mask->psrc = ~0;
870 	}
871 	rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
872 	ip_mask->ip4dst = ~0;
873 	ip_mask->pdst = ~0;
874 	return rc;
875 }
876 
877 static int
878 efx_ethtool_get_rxnfc(struct net_device *net_dev,
879 		      struct ethtool_rxnfc *info, u32 *rule_locs)
880 {
881 	struct efx_nic *efx = netdev_priv(net_dev);
882 
883 	switch (info->cmd) {
884 	case ETHTOOL_GRXRINGS:
885 		info->data = efx->n_rx_channels;
886 		return 0;
887 
888 	case ETHTOOL_GRXFH: {
889 		unsigned min_revision = 0;
890 
891 		info->data = 0;
892 		switch (info->flow_type) {
893 		case TCP_V4_FLOW:
894 			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
895 			/* fall through */
896 		case UDP_V4_FLOW:
897 		case SCTP_V4_FLOW:
898 		case AH_ESP_V4_FLOW:
899 		case IPV4_FLOW:
900 			info->data |= RXH_IP_SRC | RXH_IP_DST;
901 			min_revision = EFX_REV_FALCON_B0;
902 			break;
903 		case TCP_V6_FLOW:
904 			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
905 			/* fall through */
906 		case UDP_V6_FLOW:
907 		case SCTP_V6_FLOW:
908 		case AH_ESP_V6_FLOW:
909 		case IPV6_FLOW:
910 			info->data |= RXH_IP_SRC | RXH_IP_DST;
911 			min_revision = EFX_REV_SIENA_A0;
912 			break;
913 		default:
914 			break;
915 		}
916 		if (efx_nic_rev(efx) < min_revision)
917 			info->data = 0;
918 		return 0;
919 	}
920 
921 	case ETHTOOL_GRXCLSRLCNT:
922 		info->data = efx_filter_get_rx_id_limit(efx);
923 		if (info->data == 0)
924 			return -EOPNOTSUPP;
925 		info->data |= RX_CLS_LOC_SPECIAL;
926 		info->rule_cnt =
927 			efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
928 		return 0;
929 
930 	case ETHTOOL_GRXCLSRULE:
931 		if (efx_filter_get_rx_id_limit(efx) == 0)
932 			return -EOPNOTSUPP;
933 		return efx_ethtool_get_class_rule(efx, &info->fs);
934 
935 	case ETHTOOL_GRXCLSRLALL: {
936 		s32 rc;
937 		info->data = efx_filter_get_rx_id_limit(efx);
938 		if (info->data == 0)
939 			return -EOPNOTSUPP;
940 		rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
941 					   rule_locs, info->rule_cnt);
942 		if (rc < 0)
943 			return rc;
944 		info->rule_cnt = rc;
945 		return 0;
946 	}
947 
948 	default:
949 		return -EOPNOTSUPP;
950 	}
951 }
952 
953 static int efx_ethtool_set_class_rule(struct efx_nic *efx,
954 				      struct ethtool_rx_flow_spec *rule)
955 {
956 	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
957 	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
958 	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
959 	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
960 	struct efx_filter_spec spec;
961 	int rc;
962 
963 	/* Check that user wants us to choose the location */
964 	if (rule->location != RX_CLS_LOC_ANY)
965 		return -EINVAL;
966 
967 	/* Range-check ring_cookie */
968 	if (rule->ring_cookie >= efx->n_rx_channels &&
969 	    rule->ring_cookie != RX_CLS_FLOW_DISC)
970 		return -EINVAL;
971 
972 	/* Check for unsupported extensions */
973 	if ((rule->flow_type & FLOW_EXT) &&
974 	    (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
975 	     rule->m_ext.data[1]))
976 		return -EINVAL;
977 
978 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
979 			   (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
980 			   0xfff : rule->ring_cookie);
981 
982 	switch (rule->flow_type) {
983 	case TCP_V4_FLOW:
984 	case UDP_V4_FLOW: {
985 		u8 proto = (rule->flow_type == TCP_V4_FLOW ?
986 			    IPPROTO_TCP : IPPROTO_UDP);
987 
988 		/* Must match all of destination, */
989 		if ((__force u32)~ip_mask->ip4dst |
990 		    (__force u16)~ip_mask->pdst)
991 			return -EINVAL;
992 		/* all or none of source, */
993 		if ((ip_mask->ip4src | ip_mask->psrc) &&
994 		    ((__force u32)~ip_mask->ip4src |
995 		     (__force u16)~ip_mask->psrc))
996 			return -EINVAL;
997 		/* and nothing else */
998 		if (ip_mask->tos | rule->m_ext.vlan_tci)
999 			return -EINVAL;
1000 
1001 		if (ip_mask->ip4src)
1002 			rc = efx_filter_set_ipv4_full(&spec, proto,
1003 						      ip_entry->ip4dst,
1004 						      ip_entry->pdst,
1005 						      ip_entry->ip4src,
1006 						      ip_entry->psrc);
1007 		else
1008 			rc = efx_filter_set_ipv4_local(&spec, proto,
1009 						       ip_entry->ip4dst,
1010 						       ip_entry->pdst);
1011 		if (rc)
1012 			return rc;
1013 		break;
1014 	}
1015 
1016 	case ETHER_FLOW | FLOW_EXT:
1017 	case ETHER_FLOW: {
1018 		u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
1019 				     ntohs(rule->m_ext.vlan_tci) : 0);
1020 
1021 		/* Must not match on source address or Ethertype */
1022 		if (!is_zero_ether_addr(mac_mask->h_source) ||
1023 		    mac_mask->h_proto)
1024 			return -EINVAL;
1025 
1026 		/* Is it a default UC or MC filter? */
1027 		if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
1028 		    vlan_tag_mask == 0) {
1029 			if (is_multicast_ether_addr(mac_entry->h_dest))
1030 				rc = efx_filter_set_mc_def(&spec);
1031 			else
1032 				rc = efx_filter_set_uc_def(&spec);
1033 		}
1034 		/* Otherwise, it must match all of destination and all
1035 		 * or none of VID.
1036 		 */
1037 		else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
1038 			 (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
1039 			rc = efx_filter_set_eth_local(
1040 				&spec,
1041 				vlan_tag_mask ?
1042 				ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
1043 				mac_entry->h_dest);
1044 		} else {
1045 			rc = -EINVAL;
1046 		}
1047 		if (rc)
1048 			return rc;
1049 		break;
1050 	}
1051 
1052 	default:
1053 		return -EINVAL;
1054 	}
1055 
1056 	rc = efx_filter_insert_filter(efx, &spec, true);
1057 	if (rc < 0)
1058 		return rc;
1059 
1060 	rule->location = rc;
1061 	return 0;
1062 }
1063 
1064 static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
1065 				 struct ethtool_rxnfc *info)
1066 {
1067 	struct efx_nic *efx = netdev_priv(net_dev);
1068 
1069 	if (efx_filter_get_rx_id_limit(efx) == 0)
1070 		return -EOPNOTSUPP;
1071 
1072 	switch (info->cmd) {
1073 	case ETHTOOL_SRXCLSRLINS:
1074 		return efx_ethtool_set_class_rule(efx, &info->fs);
1075 
1076 	case ETHTOOL_SRXCLSRLDEL:
1077 		return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
1078 						 info->fs.location);
1079 
1080 	default:
1081 		return -EOPNOTSUPP;
1082 	}
1083 }
1084 
1085 static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1086 {
1087 	struct efx_nic *efx = netdev_priv(net_dev);
1088 
1089 	return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
1090 		 efx->n_rx_channels == 1) ?
1091 		0 : ARRAY_SIZE(efx->rx_indir_table));
1092 }
1093 
1094 static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
1095 {
1096 	struct efx_nic *efx = netdev_priv(net_dev);
1097 
1098 	memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
1099 	return 0;
1100 }
1101 
1102 static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
1103 				      const u32 *indir)
1104 {
1105 	struct efx_nic *efx = netdev_priv(net_dev);
1106 
1107 	memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
1108 	efx_nic_push_rx_indir_table(efx);
1109 	return 0;
1110 }
1111 
1112 static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
1113 					 struct ethtool_eeprom *ee,
1114 					 u8 *data)
1115 {
1116 	struct efx_nic *efx = netdev_priv(net_dev);
1117 	int ret;
1118 
1119 	if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
1120 		return -EOPNOTSUPP;
1121 
1122 	mutex_lock(&efx->mac_lock);
1123 	ret = efx->phy_op->get_module_eeprom(efx, ee, data);
1124 	mutex_unlock(&efx->mac_lock);
1125 
1126 	return ret;
1127 }
1128 
1129 static int efx_ethtool_get_module_info(struct net_device *net_dev,
1130 				       struct ethtool_modinfo *modinfo)
1131 {
1132 	struct efx_nic *efx = netdev_priv(net_dev);
1133 	int ret;
1134 
1135 	if (!efx->phy_op || !efx->phy_op->get_module_info)
1136 		return -EOPNOTSUPP;
1137 
1138 	mutex_lock(&efx->mac_lock);
1139 	ret = efx->phy_op->get_module_info(efx, modinfo);
1140 	mutex_unlock(&efx->mac_lock);
1141 
1142 	return ret;
1143 }
1144 
1145 const struct ethtool_ops efx_ethtool_ops = {
1146 	.get_settings		= efx_ethtool_get_settings,
1147 	.set_settings		= efx_ethtool_set_settings,
1148 	.get_drvinfo		= efx_ethtool_get_drvinfo,
1149 	.get_regs_len		= efx_ethtool_get_regs_len,
1150 	.get_regs		= efx_ethtool_get_regs,
1151 	.get_msglevel		= efx_ethtool_get_msglevel,
1152 	.set_msglevel		= efx_ethtool_set_msglevel,
1153 	.nway_reset		= efx_ethtool_nway_reset,
1154 	.get_link		= ethtool_op_get_link,
1155 	.get_coalesce		= efx_ethtool_get_coalesce,
1156 	.set_coalesce		= efx_ethtool_set_coalesce,
1157 	.get_ringparam		= efx_ethtool_get_ringparam,
1158 	.set_ringparam		= efx_ethtool_set_ringparam,
1159 	.get_pauseparam         = efx_ethtool_get_pauseparam,
1160 	.set_pauseparam         = efx_ethtool_set_pauseparam,
1161 	.get_sset_count		= efx_ethtool_get_sset_count,
1162 	.self_test		= efx_ethtool_self_test,
1163 	.get_strings		= efx_ethtool_get_strings,
1164 	.set_phys_id		= efx_ethtool_phys_id,
1165 	.get_ethtool_stats	= efx_ethtool_get_stats,
1166 	.get_wol                = efx_ethtool_get_wol,
1167 	.set_wol                = efx_ethtool_set_wol,
1168 	.reset			= efx_ethtool_reset,
1169 	.get_rxnfc		= efx_ethtool_get_rxnfc,
1170 	.set_rxnfc		= efx_ethtool_set_rxnfc,
1171 	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
1172 	.get_rxfh_indir		= efx_ethtool_get_rxfh_indir,
1173 	.set_rxfh_indir		= efx_ethtool_set_rxfh_indir,
1174 	.get_ts_info		= efx_ptp_get_ts_info,
1175 	.get_module_info	= efx_ethtool_get_module_info,
1176 	.get_module_eeprom	= efx_ethtool_get_module_eeprom,
1177 };
1178