1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 /* ethtool support for ixgbe */
5 
6 #include <linux/interrupt.h>
7 #include <linux/types.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/vmalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/uaccess.h>
16 
17 #include "ixgbe.h"
18 #include "ixgbe_phy.h"
19 
20 
21 #define IXGBE_ALL_RAR_ENTRIES 16
22 
23 enum {NETDEV_STATS, IXGBE_STATS};
24 
25 struct ixgbe_stats {
26 	char stat_string[ETH_GSTRING_LEN];
27 	int type;
28 	int sizeof_stat;
29 	int stat_offset;
30 };
31 
32 #define IXGBE_STAT(m)		IXGBE_STATS, \
33 				sizeof(((struct ixgbe_adapter *)0)->m), \
34 				offsetof(struct ixgbe_adapter, m)
35 #define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
36 				sizeof(((struct rtnl_link_stats64 *)0)->m), \
37 				offsetof(struct rtnl_link_stats64, m)
38 
39 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
40 	{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
41 	{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
42 	{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
43 	{"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
44 	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
45 	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
46 	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
47 	{"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
48 	{"lsc_int", IXGBE_STAT(lsc_int)},
49 	{"tx_busy", IXGBE_STAT(tx_busy)},
50 	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
51 	{"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
52 	{"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
53 	{"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
54 	{"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
55 	{"multicast", IXGBE_NETDEV_STAT(multicast)},
56 	{"broadcast", IXGBE_STAT(stats.bprc)},
57 	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
58 	{"collisions", IXGBE_NETDEV_STAT(collisions)},
59 	{"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
60 	{"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
61 	{"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
62 	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
63 	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
64 	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
65 	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
66 	{"fdir_overflow", IXGBE_STAT(fdir_overflow)},
67 	{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
68 	{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
69 	{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
70 	{"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
71 	{"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
72 	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
73 	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
74 	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
75 	{"rx_length_errors", IXGBE_STAT(stats.rlec)},
76 	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
77 	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
78 	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
79 	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
80 	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
81 	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
82 	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
83 	{"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
84 	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
85 	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
86 	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
87 	{"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
88 	{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
89 	{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
90 	{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
91 	{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
92 	{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
93 	{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
94 	{"tx_ipsec", IXGBE_STAT(tx_ipsec)},
95 	{"rx_ipsec", IXGBE_STAT(rx_ipsec)},
96 #ifdef IXGBE_FCOE
97 	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
98 	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
99 	{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
100 	{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
101 	{"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
102 	{"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
103 	{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
104 	{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
105 #endif /* IXGBE_FCOE */
106 };
107 
108 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
109  * we set the num_rx_queues to evaluate to num_tx_queues. This is
110  * used because we do not have a good way to get the max number of
111  * rx queues with CONFIG_RPS disabled.
112  */
113 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
114 
115 #define IXGBE_QUEUE_STATS_LEN ( \
116 	(netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
117 	(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
118 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
119 #define IXGBE_PB_STATS_LEN ( \
120 			(sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
121 			 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
122 			 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
123 			 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
124 			/ sizeof(u64))
125 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
126 			 IXGBE_PB_STATS_LEN + \
127 			 IXGBE_QUEUE_STATS_LEN)
128 
129 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
130 	"Register test  (offline)", "Eeprom test    (offline)",
131 	"Interrupt test (offline)", "Loopback test  (offline)",
132 	"Link test   (on/offline)"
133 };
134 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
135 
136 static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
137 #define IXGBE_PRIV_FLAGS_LEGACY_RX	BIT(0)
138 	"legacy-rx",
139 #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN	BIT(1)
140 	"vf-ipsec",
141 };
142 
143 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
144 
145 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
146 
147 static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
148 					 struct ethtool_link_ksettings *cmd)
149 {
150 	if (!ixgbe_isbackplane(hw->phy.media_type)) {
151 		ethtool_link_ksettings_add_link_mode(cmd, supported,
152 						     10000baseT_Full);
153 		return;
154 	}
155 
156 	switch (hw->device_id) {
157 	case IXGBE_DEV_ID_82598:
158 	case IXGBE_DEV_ID_82599_KX4:
159 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
160 	case IXGBE_DEV_ID_X550EM_X_KX4:
161 		ethtool_link_ksettings_add_link_mode
162 			(cmd, supported, 10000baseKX4_Full);
163 		break;
164 	case IXGBE_DEV_ID_82598_BX:
165 	case IXGBE_DEV_ID_82599_KR:
166 	case IXGBE_DEV_ID_X550EM_X_KR:
167 	case IXGBE_DEV_ID_X550EM_X_XFI:
168 		ethtool_link_ksettings_add_link_mode
169 			(cmd, supported, 10000baseKR_Full);
170 		break;
171 	default:
172 		ethtool_link_ksettings_add_link_mode
173 			(cmd, supported, 10000baseKX4_Full);
174 		ethtool_link_ksettings_add_link_mode
175 			(cmd, supported, 10000baseKR_Full);
176 		break;
177 	}
178 }
179 
180 static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
181 					   struct ethtool_link_ksettings *cmd)
182 {
183 	if (!ixgbe_isbackplane(hw->phy.media_type)) {
184 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
185 						     10000baseT_Full);
186 		return;
187 	}
188 
189 	switch (hw->device_id) {
190 	case IXGBE_DEV_ID_82598:
191 	case IXGBE_DEV_ID_82599_KX4:
192 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
193 	case IXGBE_DEV_ID_X550EM_X_KX4:
194 		ethtool_link_ksettings_add_link_mode
195 			(cmd, advertising, 10000baseKX4_Full);
196 		break;
197 	case IXGBE_DEV_ID_82598_BX:
198 	case IXGBE_DEV_ID_82599_KR:
199 	case IXGBE_DEV_ID_X550EM_X_KR:
200 	case IXGBE_DEV_ID_X550EM_X_XFI:
201 		ethtool_link_ksettings_add_link_mode
202 			(cmd, advertising, 10000baseKR_Full);
203 		break;
204 	default:
205 		ethtool_link_ksettings_add_link_mode
206 			(cmd, advertising, 10000baseKX4_Full);
207 		ethtool_link_ksettings_add_link_mode
208 			(cmd, advertising, 10000baseKR_Full);
209 		break;
210 	}
211 }
212 
213 static int ixgbe_get_link_ksettings(struct net_device *netdev,
214 				    struct ethtool_link_ksettings *cmd)
215 {
216 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
217 	struct ixgbe_hw *hw = &adapter->hw;
218 	ixgbe_link_speed supported_link;
219 	bool autoneg = false;
220 
221 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
222 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
223 
224 	hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
225 
226 	/* set the supported link speeds */
227 	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
228 		ixgbe_set_supported_10gtypes(hw, cmd);
229 		ixgbe_set_advertising_10gtypes(hw, cmd);
230 	}
231 	if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
232 		ethtool_link_ksettings_add_link_mode(cmd, supported,
233 						     5000baseT_Full);
234 
235 	if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
236 		ethtool_link_ksettings_add_link_mode(cmd, supported,
237 						     2500baseT_Full);
238 
239 	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
240 		if (ixgbe_isbackplane(hw->phy.media_type)) {
241 			ethtool_link_ksettings_add_link_mode(cmd, supported,
242 							     1000baseKX_Full);
243 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
244 							     1000baseKX_Full);
245 		} else {
246 			ethtool_link_ksettings_add_link_mode(cmd, supported,
247 							     1000baseT_Full);
248 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
249 							     1000baseT_Full);
250 		}
251 	}
252 	if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
253 		ethtool_link_ksettings_add_link_mode(cmd, supported,
254 						     100baseT_Full);
255 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
256 						     100baseT_Full);
257 	}
258 	if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
259 		ethtool_link_ksettings_add_link_mode(cmd, supported,
260 						     10baseT_Full);
261 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
262 						     10baseT_Full);
263 	}
264 
265 	/* set the advertised speeds */
266 	if (hw->phy.autoneg_advertised) {
267 		ethtool_link_ksettings_zero_link_mode(cmd, advertising);
268 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
269 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
270 							     10baseT_Full);
271 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
272 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
273 							     100baseT_Full);
274 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
275 			ixgbe_set_advertising_10gtypes(hw, cmd);
276 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
277 			if (ethtool_link_ksettings_test_link_mode
278 				(cmd, supported, 1000baseKX_Full))
279 				ethtool_link_ksettings_add_link_mode
280 					(cmd, advertising, 1000baseKX_Full);
281 			else
282 				ethtool_link_ksettings_add_link_mode
283 					(cmd, advertising, 1000baseT_Full);
284 		}
285 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
286 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
287 							     5000baseT_Full);
288 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
289 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
290 							     2500baseT_Full);
291 	} else {
292 		if (hw->phy.multispeed_fiber && !autoneg) {
293 			if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
294 				ethtool_link_ksettings_add_link_mode
295 					(cmd, advertising, 10000baseT_Full);
296 		}
297 	}
298 
299 	if (autoneg) {
300 		ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
301 		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
302 		cmd->base.autoneg = AUTONEG_ENABLE;
303 	} else
304 		cmd->base.autoneg = AUTONEG_DISABLE;
305 
306 	/* Determine the remaining settings based on the PHY type. */
307 	switch (adapter->hw.phy.type) {
308 	case ixgbe_phy_tn:
309 	case ixgbe_phy_aq:
310 	case ixgbe_phy_x550em_ext_t:
311 	case ixgbe_phy_fw:
312 	case ixgbe_phy_cu_unknown:
313 		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
314 		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
315 		cmd->base.port = PORT_TP;
316 		break;
317 	case ixgbe_phy_qt:
318 		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
319 		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
320 		cmd->base.port = PORT_FIBRE;
321 		break;
322 	case ixgbe_phy_nl:
323 	case ixgbe_phy_sfp_passive_tyco:
324 	case ixgbe_phy_sfp_passive_unknown:
325 	case ixgbe_phy_sfp_ftl:
326 	case ixgbe_phy_sfp_avago:
327 	case ixgbe_phy_sfp_intel:
328 	case ixgbe_phy_sfp_unknown:
329 	case ixgbe_phy_qsfp_passive_unknown:
330 	case ixgbe_phy_qsfp_active_unknown:
331 	case ixgbe_phy_qsfp_intel:
332 	case ixgbe_phy_qsfp_unknown:
333 		/* SFP+ devices, further checking needed */
334 		switch (adapter->hw.phy.sfp_type) {
335 		case ixgbe_sfp_type_da_cu:
336 		case ixgbe_sfp_type_da_cu_core0:
337 		case ixgbe_sfp_type_da_cu_core1:
338 			ethtool_link_ksettings_add_link_mode(cmd, supported,
339 							     FIBRE);
340 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
341 							     FIBRE);
342 			cmd->base.port = PORT_DA;
343 			break;
344 		case ixgbe_sfp_type_sr:
345 		case ixgbe_sfp_type_lr:
346 		case ixgbe_sfp_type_srlr_core0:
347 		case ixgbe_sfp_type_srlr_core1:
348 		case ixgbe_sfp_type_1g_sx_core0:
349 		case ixgbe_sfp_type_1g_sx_core1:
350 		case ixgbe_sfp_type_1g_lx_core0:
351 		case ixgbe_sfp_type_1g_lx_core1:
352 			ethtool_link_ksettings_add_link_mode(cmd, supported,
353 							     FIBRE);
354 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
355 							     FIBRE);
356 			cmd->base.port = PORT_FIBRE;
357 			break;
358 		case ixgbe_sfp_type_not_present:
359 			ethtool_link_ksettings_add_link_mode(cmd, supported,
360 							     FIBRE);
361 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
362 							     FIBRE);
363 			cmd->base.port = PORT_NONE;
364 			break;
365 		case ixgbe_sfp_type_1g_cu_core0:
366 		case ixgbe_sfp_type_1g_cu_core1:
367 			ethtool_link_ksettings_add_link_mode(cmd, supported,
368 							     TP);
369 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
370 							     TP);
371 			cmd->base.port = PORT_TP;
372 			break;
373 		case ixgbe_sfp_type_unknown:
374 		default:
375 			ethtool_link_ksettings_add_link_mode(cmd, supported,
376 							     FIBRE);
377 			ethtool_link_ksettings_add_link_mode(cmd, advertising,
378 							     FIBRE);
379 			cmd->base.port = PORT_OTHER;
380 			break;
381 		}
382 		break;
383 	case ixgbe_phy_xaui:
384 		ethtool_link_ksettings_add_link_mode(cmd, supported,
385 						     FIBRE);
386 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
387 						     FIBRE);
388 		cmd->base.port = PORT_NONE;
389 		break;
390 	case ixgbe_phy_unknown:
391 	case ixgbe_phy_generic:
392 	case ixgbe_phy_sfp_unsupported:
393 	default:
394 		ethtool_link_ksettings_add_link_mode(cmd, supported,
395 						     FIBRE);
396 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
397 						     FIBRE);
398 		cmd->base.port = PORT_OTHER;
399 		break;
400 	}
401 
402 	/* Indicate pause support */
403 	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
404 
405 	switch (hw->fc.requested_mode) {
406 	case ixgbe_fc_full:
407 		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
408 		break;
409 	case ixgbe_fc_rx_pause:
410 		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
411 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
412 						     Asym_Pause);
413 		break;
414 	case ixgbe_fc_tx_pause:
415 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
416 						     Asym_Pause);
417 		break;
418 	default:
419 		ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
420 		ethtool_link_ksettings_del_link_mode(cmd, advertising,
421 						     Asym_Pause);
422 	}
423 
424 	if (netif_carrier_ok(netdev)) {
425 		switch (adapter->link_speed) {
426 		case IXGBE_LINK_SPEED_10GB_FULL:
427 			cmd->base.speed = SPEED_10000;
428 			break;
429 		case IXGBE_LINK_SPEED_5GB_FULL:
430 			cmd->base.speed = SPEED_5000;
431 			break;
432 		case IXGBE_LINK_SPEED_2_5GB_FULL:
433 			cmd->base.speed = SPEED_2500;
434 			break;
435 		case IXGBE_LINK_SPEED_1GB_FULL:
436 			cmd->base.speed = SPEED_1000;
437 			break;
438 		case IXGBE_LINK_SPEED_100_FULL:
439 			cmd->base.speed = SPEED_100;
440 			break;
441 		case IXGBE_LINK_SPEED_10_FULL:
442 			cmd->base.speed = SPEED_10;
443 			break;
444 		default:
445 			break;
446 		}
447 		cmd->base.duplex = DUPLEX_FULL;
448 	} else {
449 		cmd->base.speed = SPEED_UNKNOWN;
450 		cmd->base.duplex = DUPLEX_UNKNOWN;
451 	}
452 
453 	return 0;
454 }
455 
456 static int ixgbe_set_link_ksettings(struct net_device *netdev,
457 				    const struct ethtool_link_ksettings *cmd)
458 {
459 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
460 	struct ixgbe_hw *hw = &adapter->hw;
461 	u32 advertised, old;
462 	s32 err = 0;
463 
464 	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
465 	    (hw->phy.multispeed_fiber)) {
466 		/*
467 		 * this function does not support duplex forcing, but can
468 		 * limit the advertising of the adapter to the specified speed
469 		 */
470 		if (!bitmap_subset(cmd->link_modes.advertising,
471 				   cmd->link_modes.supported,
472 				   __ETHTOOL_LINK_MODE_MASK_NBITS))
473 			return -EINVAL;
474 
475 		/* only allow one speed at a time if no autoneg */
476 		if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
477 			if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
478 								  10000baseT_Full) &&
479 			    ethtool_link_ksettings_test_link_mode(cmd, advertising,
480 								  1000baseT_Full))
481 				return -EINVAL;
482 		}
483 
484 		old = hw->phy.autoneg_advertised;
485 		advertised = 0;
486 		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
487 							  10000baseT_Full))
488 			advertised |= IXGBE_LINK_SPEED_10GB_FULL;
489 		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
490 							  5000baseT_Full))
491 			advertised |= IXGBE_LINK_SPEED_5GB_FULL;
492 		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
493 							  2500baseT_Full))
494 			advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
495 		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
496 							  1000baseT_Full))
497 			advertised |= IXGBE_LINK_SPEED_1GB_FULL;
498 
499 		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
500 							  100baseT_Full))
501 			advertised |= IXGBE_LINK_SPEED_100_FULL;
502 
503 		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
504 							  10baseT_Full))
505 			advertised |= IXGBE_LINK_SPEED_10_FULL;
506 
507 		if (old == advertised)
508 			return err;
509 		/* this sets the link speed and restarts auto-neg */
510 		while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
511 			usleep_range(1000, 2000);
512 
513 		hw->mac.autotry_restart = true;
514 		err = hw->mac.ops.setup_link(hw, advertised, true);
515 		if (err) {
516 			e_info(probe, "setup link failed with code %d\n", err);
517 			hw->mac.ops.setup_link(hw, old, true);
518 		}
519 		clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
520 	} else {
521 		/* in this case we currently only support 10Gb/FULL */
522 		u32 speed = cmd->base.speed;
523 
524 		if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
525 		    (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
526 							    10000baseT_Full)) ||
527 		    (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
528 			return -EINVAL;
529 	}
530 
531 	return err;
532 }
533 
534 static void ixgbe_get_pause_stats(struct net_device *netdev,
535 				  struct ethtool_pause_stats *stats)
536 {
537 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
538 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
539 
540 	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
541 	stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
542 }
543 
544 static void ixgbe_get_pauseparam(struct net_device *netdev,
545 				 struct ethtool_pauseparam *pause)
546 {
547 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
548 	struct ixgbe_hw *hw = &adapter->hw;
549 
550 	if (ixgbe_device_supports_autoneg_fc(hw) &&
551 	    !hw->fc.disable_fc_autoneg)
552 		pause->autoneg = 1;
553 	else
554 		pause->autoneg = 0;
555 
556 	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
557 		pause->rx_pause = 1;
558 	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
559 		pause->tx_pause = 1;
560 	} else if (hw->fc.current_mode == ixgbe_fc_full) {
561 		pause->rx_pause = 1;
562 		pause->tx_pause = 1;
563 	}
564 }
565 
566 static int ixgbe_set_pauseparam(struct net_device *netdev,
567 				struct ethtool_pauseparam *pause)
568 {
569 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
570 	struct ixgbe_hw *hw = &adapter->hw;
571 	struct ixgbe_fc_info fc = hw->fc;
572 
573 	/* 82598 does no support link flow control with DCB enabled */
574 	if ((hw->mac.type == ixgbe_mac_82598EB) &&
575 	    (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
576 		return -EINVAL;
577 
578 	/* some devices do not support autoneg of link flow control */
579 	if ((pause->autoneg == AUTONEG_ENABLE) &&
580 	    !ixgbe_device_supports_autoneg_fc(hw))
581 		return -EINVAL;
582 
583 	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
584 
585 	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
586 		fc.requested_mode = ixgbe_fc_full;
587 	else if (pause->rx_pause && !pause->tx_pause)
588 		fc.requested_mode = ixgbe_fc_rx_pause;
589 	else if (!pause->rx_pause && pause->tx_pause)
590 		fc.requested_mode = ixgbe_fc_tx_pause;
591 	else
592 		fc.requested_mode = ixgbe_fc_none;
593 
594 	/* if the thing changed then we'll update and use new autoneg */
595 	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
596 		hw->fc = fc;
597 		if (netif_running(netdev))
598 			ixgbe_reinit_locked(adapter);
599 		else
600 			ixgbe_reset(adapter);
601 	}
602 
603 	return 0;
604 }
605 
606 static u32 ixgbe_get_msglevel(struct net_device *netdev)
607 {
608 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
609 	return adapter->msg_enable;
610 }
611 
612 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
613 {
614 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
615 	adapter->msg_enable = data;
616 }
617 
618 static int ixgbe_get_regs_len(struct net_device *netdev)
619 {
620 #define IXGBE_REGS_LEN  1145
621 	return IXGBE_REGS_LEN * sizeof(u32);
622 }
623 
624 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
625 
626 static void ixgbe_get_regs(struct net_device *netdev,
627 			   struct ethtool_regs *regs, void *p)
628 {
629 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
630 	struct ixgbe_hw *hw = &adapter->hw;
631 	u32 *regs_buff = p;
632 	u8 i;
633 
634 	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
635 
636 	regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
637 			hw->device_id;
638 
639 	/* General Registers */
640 	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
641 	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
642 	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
643 	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
644 	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
645 	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
646 	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
647 	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
648 
649 	/* NVM Register */
650 	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
651 	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
652 	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
653 	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
654 	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
655 	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
656 	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
657 	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
658 	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
659 	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
660 
661 	/* Interrupt */
662 	/* don't read EICR because it can clear interrupt causes, instead
663 	 * read EICS which is a shadow but doesn't clear EICR */
664 	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
665 	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
666 	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
667 	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
668 	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
669 	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
670 	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
671 	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
672 	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
673 	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
674 	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
675 	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
676 
677 	/* Flow Control */
678 	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
679 	for (i = 0; i < 4; i++)
680 		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
681 	for (i = 0; i < 8; i++) {
682 		switch (hw->mac.type) {
683 		case ixgbe_mac_82598EB:
684 			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
685 			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
686 			break;
687 		case ixgbe_mac_82599EB:
688 		case ixgbe_mac_X540:
689 		case ixgbe_mac_X550:
690 		case ixgbe_mac_X550EM_x:
691 		case ixgbe_mac_x550em_a:
692 			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
693 			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
694 			break;
695 		default:
696 			break;
697 		}
698 	}
699 	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
700 	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
701 
702 	/* Receive DMA */
703 	for (i = 0; i < 64; i++)
704 		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
705 	for (i = 0; i < 64; i++)
706 		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
707 	for (i = 0; i < 64; i++)
708 		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
709 	for (i = 0; i < 64; i++)
710 		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
711 	for (i = 0; i < 64; i++)
712 		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
713 	for (i = 0; i < 64; i++)
714 		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
715 	for (i = 0; i < 16; i++)
716 		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
717 	for (i = 0; i < 16; i++)
718 		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
719 	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
720 	for (i = 0; i < 8; i++)
721 		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
722 	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
723 	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
724 
725 	/* Receive */
726 	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
727 	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
728 	for (i = 0; i < 16; i++)
729 		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
730 	for (i = 0; i < 16; i++)
731 		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
732 	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
733 	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
734 	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
735 	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
736 	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
737 	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
738 	for (i = 0; i < 8; i++)
739 		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
740 	for (i = 0; i < 8; i++)
741 		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
742 	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
743 
744 	/* Transmit */
745 	for (i = 0; i < 32; i++)
746 		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
747 	for (i = 0; i < 32; i++)
748 		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
749 	for (i = 0; i < 32; i++)
750 		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
751 	for (i = 0; i < 32; i++)
752 		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
753 	for (i = 0; i < 32; i++)
754 		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
755 	for (i = 0; i < 32; i++)
756 		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
757 	for (i = 0; i < 32; i++)
758 		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
759 	for (i = 0; i < 32; i++)
760 		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
761 	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
762 	for (i = 0; i < 16; i++)
763 		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
764 	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
765 	for (i = 0; i < 8; i++)
766 		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
767 	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
768 
769 	/* Wake Up */
770 	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
771 	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
772 	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
773 	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
774 	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
775 	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
776 	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
777 	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
778 	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
779 
780 	/* DCB */
781 	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
782 	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
783 
784 	switch (hw->mac.type) {
785 	case ixgbe_mac_82598EB:
786 		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
787 		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
788 		for (i = 0; i < 8; i++)
789 			regs_buff[833 + i] =
790 				IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
791 		for (i = 0; i < 8; i++)
792 			regs_buff[841 + i] =
793 				IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
794 		for (i = 0; i < 8; i++)
795 			regs_buff[849 + i] =
796 				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
797 		for (i = 0; i < 8; i++)
798 			regs_buff[857 + i] =
799 				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
800 		break;
801 	case ixgbe_mac_82599EB:
802 	case ixgbe_mac_X540:
803 	case ixgbe_mac_X550:
804 	case ixgbe_mac_X550EM_x:
805 	case ixgbe_mac_x550em_a:
806 		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
807 		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
808 		for (i = 0; i < 8; i++)
809 			regs_buff[833 + i] =
810 				IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
811 		for (i = 0; i < 8; i++)
812 			regs_buff[841 + i] =
813 				IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
814 		for (i = 0; i < 8; i++)
815 			regs_buff[849 + i] =
816 				IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
817 		for (i = 0; i < 8; i++)
818 			regs_buff[857 + i] =
819 				IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
820 		break;
821 	default:
822 		break;
823 	}
824 
825 	for (i = 0; i < 8; i++)
826 		regs_buff[865 + i] =
827 		IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
828 	for (i = 0; i < 8; i++)
829 		regs_buff[873 + i] =
830 		IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
831 
832 	/* Statistics */
833 	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
834 	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
835 	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
836 	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
837 	for (i = 0; i < 8; i++)
838 		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
839 	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
840 	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
841 	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
842 	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
843 	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
844 	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
845 	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
846 	for (i = 0; i < 8; i++)
847 		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
848 	for (i = 0; i < 8; i++)
849 		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
850 	for (i = 0; i < 8; i++)
851 		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
852 	for (i = 0; i < 8; i++)
853 		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
854 	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
855 	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
856 	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
857 	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
858 	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
859 	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
860 	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
861 	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
862 	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
863 	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
864 	regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
865 	regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
866 	regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
867 	regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
868 	for (i = 0; i < 8; i++)
869 		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
870 	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
871 	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
872 	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
873 	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
874 	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
875 	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
876 	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
877 	regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
878 	regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
879 	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
880 	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
881 	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
882 	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
883 	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
884 	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
885 	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
886 	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
887 	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
888 	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
889 	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
890 	for (i = 0; i < 16; i++)
891 		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
892 	for (i = 0; i < 16; i++)
893 		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
894 	for (i = 0; i < 16; i++)
895 		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
896 	for (i = 0; i < 16; i++)
897 		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
898 
899 	/* MAC */
900 	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
901 	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
902 	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
903 	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
904 	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
905 	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
906 	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
907 	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
908 	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
909 	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
910 	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
911 	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
912 	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
913 	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
914 	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
915 	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
916 	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
917 	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
918 	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
919 	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
920 	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
921 	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
922 	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
923 	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
924 	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
925 	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
926 	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
927 	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
928 	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
929 	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
930 	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
931 	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
932 	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
933 
934 	/* Diagnostic */
935 	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
936 	for (i = 0; i < 8; i++)
937 		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
938 	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
939 	for (i = 0; i < 4; i++)
940 		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
941 	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
942 	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
943 	for (i = 0; i < 8; i++)
944 		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
945 	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
946 	for (i = 0; i < 4; i++)
947 		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
948 	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
949 	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
950 	for (i = 0; i < 4; i++)
951 		regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
952 	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
953 	for (i = 0; i < 4; i++)
954 		regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
955 	for (i = 0; i < 8; i++)
956 		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
957 	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
958 	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
959 	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
960 	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
961 	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
962 	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
963 	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
964 	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
965 	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
966 
967 	/* 82599 X540 specific registers  */
968 	regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
969 
970 	/* 82599 X540 specific DCB registers  */
971 	regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
972 	regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
973 	for (i = 0; i < 4; i++)
974 		regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
975 	regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
976 					/* same as RTTQCNRM */
977 	regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
978 					/* same as RTTQCNRR */
979 
980 	/* X540 specific DCB registers  */
981 	regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
982 	regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
983 
984 	/* Security config registers */
985 	regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
986 	regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
987 	regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
988 	regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
989 	regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
990 	regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
991 }
992 
993 static int ixgbe_get_eeprom_len(struct net_device *netdev)
994 {
995 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
996 	return adapter->hw.eeprom.word_size * 2;
997 }
998 
999 static int ixgbe_get_eeprom(struct net_device *netdev,
1000 			    struct ethtool_eeprom *eeprom, u8 *bytes)
1001 {
1002 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1003 	struct ixgbe_hw *hw = &adapter->hw;
1004 	u16 *eeprom_buff;
1005 	int first_word, last_word, eeprom_len;
1006 	int ret_val = 0;
1007 	u16 i;
1008 
1009 	if (eeprom->len == 0)
1010 		return -EINVAL;
1011 
1012 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1013 
1014 	first_word = eeprom->offset >> 1;
1015 	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1016 	eeprom_len = last_word - first_word + 1;
1017 
1018 	eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
1019 	if (!eeprom_buff)
1020 		return -ENOMEM;
1021 
1022 	ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
1023 					     eeprom_buff);
1024 
1025 	/* Device's eeprom is always little-endian, word addressable */
1026 	for (i = 0; i < eeprom_len; i++)
1027 		le16_to_cpus(&eeprom_buff[i]);
1028 
1029 	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1030 	kfree(eeprom_buff);
1031 
1032 	return ret_val;
1033 }
1034 
1035 static int ixgbe_set_eeprom(struct net_device *netdev,
1036 			    struct ethtool_eeprom *eeprom, u8 *bytes)
1037 {
1038 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1039 	struct ixgbe_hw *hw = &adapter->hw;
1040 	u16 *eeprom_buff;
1041 	void *ptr;
1042 	int max_len, first_word, last_word, ret_val = 0;
1043 	u16 i;
1044 
1045 	if (eeprom->len == 0)
1046 		return -EINVAL;
1047 
1048 	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1049 		return -EINVAL;
1050 
1051 	max_len = hw->eeprom.word_size * 2;
1052 
1053 	first_word = eeprom->offset >> 1;
1054 	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1055 	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1056 	if (!eeprom_buff)
1057 		return -ENOMEM;
1058 
1059 	ptr = eeprom_buff;
1060 
1061 	if (eeprom->offset & 1) {
1062 		/*
1063 		 * need read/modify/write of first changed EEPROM word
1064 		 * only the second byte of the word is being modified
1065 		 */
1066 		ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
1067 		if (ret_val)
1068 			goto err;
1069 
1070 		ptr++;
1071 	}
1072 	if ((eeprom->offset + eeprom->len) & 1) {
1073 		/*
1074 		 * need read/modify/write of last changed EEPROM word
1075 		 * only the first byte of the word is being modified
1076 		 */
1077 		ret_val = hw->eeprom.ops.read(hw, last_word,
1078 					  &eeprom_buff[last_word - first_word]);
1079 		if (ret_val)
1080 			goto err;
1081 	}
1082 
1083 	/* Device's eeprom is always little-endian, word addressable */
1084 	for (i = 0; i < last_word - first_word + 1; i++)
1085 		le16_to_cpus(&eeprom_buff[i]);
1086 
1087 	memcpy(ptr, bytes, eeprom->len);
1088 
1089 	for (i = 0; i < last_word - first_word + 1; i++)
1090 		cpu_to_le16s(&eeprom_buff[i]);
1091 
1092 	ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1093 					      last_word - first_word + 1,
1094 					      eeprom_buff);
1095 
1096 	/* Update the checksum */
1097 	if (ret_val == 0)
1098 		hw->eeprom.ops.update_checksum(hw);
1099 
1100 err:
1101 	kfree(eeprom_buff);
1102 	return ret_val;
1103 }
1104 
1105 static void ixgbe_get_drvinfo(struct net_device *netdev,
1106 			      struct ethtool_drvinfo *drvinfo)
1107 {
1108 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1109 
1110 	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1111 
1112 	strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1113 		sizeof(drvinfo->fw_version));
1114 
1115 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1116 		sizeof(drvinfo->bus_info));
1117 
1118 	drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1119 }
1120 
1121 static void ixgbe_get_ringparam(struct net_device *netdev,
1122 				struct ethtool_ringparam *ring)
1123 {
1124 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1125 	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1126 	struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1127 
1128 	ring->rx_max_pending = IXGBE_MAX_RXD;
1129 	ring->tx_max_pending = IXGBE_MAX_TXD;
1130 	ring->rx_pending = rx_ring->count;
1131 	ring->tx_pending = tx_ring->count;
1132 }
1133 
1134 static int ixgbe_set_ringparam(struct net_device *netdev,
1135 			       struct ethtool_ringparam *ring)
1136 {
1137 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1138 	struct ixgbe_ring *temp_ring;
1139 	int i, j, err = 0;
1140 	u32 new_rx_count, new_tx_count;
1141 
1142 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1143 		return -EINVAL;
1144 
1145 	new_tx_count = clamp_t(u32, ring->tx_pending,
1146 			       IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1147 	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1148 
1149 	new_rx_count = clamp_t(u32, ring->rx_pending,
1150 			       IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1151 	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1152 
1153 	if ((new_tx_count == adapter->tx_ring_count) &&
1154 	    (new_rx_count == adapter->rx_ring_count)) {
1155 		/* nothing to do */
1156 		return 0;
1157 	}
1158 
1159 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1160 		usleep_range(1000, 2000);
1161 
1162 	if (!netif_running(adapter->netdev)) {
1163 		for (i = 0; i < adapter->num_tx_queues; i++)
1164 			adapter->tx_ring[i]->count = new_tx_count;
1165 		for (i = 0; i < adapter->num_xdp_queues; i++)
1166 			adapter->xdp_ring[i]->count = new_tx_count;
1167 		for (i = 0; i < adapter->num_rx_queues; i++)
1168 			adapter->rx_ring[i]->count = new_rx_count;
1169 		adapter->tx_ring_count = new_tx_count;
1170 		adapter->xdp_ring_count = new_tx_count;
1171 		adapter->rx_ring_count = new_rx_count;
1172 		goto clear_reset;
1173 	}
1174 
1175 	/* allocate temporary buffer to store rings in */
1176 	i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1177 		  adapter->num_rx_queues);
1178 	temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
1179 
1180 	if (!temp_ring) {
1181 		err = -ENOMEM;
1182 		goto clear_reset;
1183 	}
1184 
1185 	ixgbe_down(adapter);
1186 
1187 	/*
1188 	 * Setup new Tx resources and free the old Tx resources in that order.
1189 	 * We can then assign the new resources to the rings via a memcpy.
1190 	 * The advantage to this approach is that we are guaranteed to still
1191 	 * have resources even in the case of an allocation failure.
1192 	 */
1193 	if (new_tx_count != adapter->tx_ring_count) {
1194 		for (i = 0; i < adapter->num_tx_queues; i++) {
1195 			memcpy(&temp_ring[i], adapter->tx_ring[i],
1196 			       sizeof(struct ixgbe_ring));
1197 
1198 			temp_ring[i].count = new_tx_count;
1199 			err = ixgbe_setup_tx_resources(&temp_ring[i]);
1200 			if (err) {
1201 				while (i) {
1202 					i--;
1203 					ixgbe_free_tx_resources(&temp_ring[i]);
1204 				}
1205 				goto err_setup;
1206 			}
1207 		}
1208 
1209 		for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1210 			memcpy(&temp_ring[i], adapter->xdp_ring[j],
1211 			       sizeof(struct ixgbe_ring));
1212 
1213 			temp_ring[i].count = new_tx_count;
1214 			err = ixgbe_setup_tx_resources(&temp_ring[i]);
1215 			if (err) {
1216 				while (i) {
1217 					i--;
1218 					ixgbe_free_tx_resources(&temp_ring[i]);
1219 				}
1220 				goto err_setup;
1221 			}
1222 		}
1223 
1224 		for (i = 0; i < adapter->num_tx_queues; i++) {
1225 			ixgbe_free_tx_resources(adapter->tx_ring[i]);
1226 
1227 			memcpy(adapter->tx_ring[i], &temp_ring[i],
1228 			       sizeof(struct ixgbe_ring));
1229 		}
1230 		for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1231 			ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1232 
1233 			memcpy(adapter->xdp_ring[j], &temp_ring[i],
1234 			       sizeof(struct ixgbe_ring));
1235 		}
1236 
1237 		adapter->tx_ring_count = new_tx_count;
1238 	}
1239 
1240 	/* Repeat the process for the Rx rings if needed */
1241 	if (new_rx_count != adapter->rx_ring_count) {
1242 		for (i = 0; i < adapter->num_rx_queues; i++) {
1243 			memcpy(&temp_ring[i], adapter->rx_ring[i],
1244 			       sizeof(struct ixgbe_ring));
1245 
1246 			/* Clear copied XDP RX-queue info */
1247 			memset(&temp_ring[i].xdp_rxq, 0,
1248 			       sizeof(temp_ring[i].xdp_rxq));
1249 
1250 			temp_ring[i].count = new_rx_count;
1251 			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1252 			if (err) {
1253 				while (i) {
1254 					i--;
1255 					ixgbe_free_rx_resources(&temp_ring[i]);
1256 				}
1257 				goto err_setup;
1258 			}
1259 
1260 		}
1261 
1262 		for (i = 0; i < adapter->num_rx_queues; i++) {
1263 			ixgbe_free_rx_resources(adapter->rx_ring[i]);
1264 
1265 			memcpy(adapter->rx_ring[i], &temp_ring[i],
1266 			       sizeof(struct ixgbe_ring));
1267 		}
1268 
1269 		adapter->rx_ring_count = new_rx_count;
1270 	}
1271 
1272 err_setup:
1273 	ixgbe_up(adapter);
1274 	vfree(temp_ring);
1275 clear_reset:
1276 	clear_bit(__IXGBE_RESETTING, &adapter->state);
1277 	return err;
1278 }
1279 
1280 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1281 {
1282 	switch (sset) {
1283 	case ETH_SS_TEST:
1284 		return IXGBE_TEST_LEN;
1285 	case ETH_SS_STATS:
1286 		return IXGBE_STATS_LEN;
1287 	case ETH_SS_PRIV_FLAGS:
1288 		return IXGBE_PRIV_FLAGS_STR_LEN;
1289 	default:
1290 		return -EOPNOTSUPP;
1291 	}
1292 }
1293 
1294 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1295 				    struct ethtool_stats *stats, u64 *data)
1296 {
1297 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1298 	struct rtnl_link_stats64 temp;
1299 	const struct rtnl_link_stats64 *net_stats;
1300 	unsigned int start;
1301 	struct ixgbe_ring *ring;
1302 	int i, j;
1303 	char *p = NULL;
1304 
1305 	ixgbe_update_stats(adapter);
1306 	net_stats = dev_get_stats(netdev, &temp);
1307 	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1308 		switch (ixgbe_gstrings_stats[i].type) {
1309 		case NETDEV_STATS:
1310 			p = (char *) net_stats +
1311 					ixgbe_gstrings_stats[i].stat_offset;
1312 			break;
1313 		case IXGBE_STATS:
1314 			p = (char *) adapter +
1315 					ixgbe_gstrings_stats[i].stat_offset;
1316 			break;
1317 		default:
1318 			data[i] = 0;
1319 			continue;
1320 		}
1321 
1322 		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1323 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1324 	}
1325 	for (j = 0; j < netdev->num_tx_queues; j++) {
1326 		ring = adapter->tx_ring[j];
1327 		if (!ring) {
1328 			data[i] = 0;
1329 			data[i+1] = 0;
1330 			i += 2;
1331 			continue;
1332 		}
1333 
1334 		do {
1335 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1336 			data[i]   = ring->stats.packets;
1337 			data[i+1] = ring->stats.bytes;
1338 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1339 		i += 2;
1340 	}
1341 	for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1342 		ring = adapter->rx_ring[j];
1343 		if (!ring) {
1344 			data[i] = 0;
1345 			data[i+1] = 0;
1346 			i += 2;
1347 			continue;
1348 		}
1349 
1350 		do {
1351 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1352 			data[i]   = ring->stats.packets;
1353 			data[i+1] = ring->stats.bytes;
1354 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1355 		i += 2;
1356 	}
1357 
1358 	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1359 		data[i++] = adapter->stats.pxontxc[j];
1360 		data[i++] = adapter->stats.pxofftxc[j];
1361 	}
1362 	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1363 		data[i++] = adapter->stats.pxonrxc[j];
1364 		data[i++] = adapter->stats.pxoffrxc[j];
1365 	}
1366 }
1367 
1368 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1369 			      u8 *data)
1370 {
1371 	char *p = (char *)data;
1372 	unsigned int i;
1373 
1374 	switch (stringset) {
1375 	case ETH_SS_TEST:
1376 		for (i = 0; i < IXGBE_TEST_LEN; i++) {
1377 			memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1378 			data += ETH_GSTRING_LEN;
1379 		}
1380 		break;
1381 	case ETH_SS_STATS:
1382 		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1383 			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1384 			       ETH_GSTRING_LEN);
1385 			p += ETH_GSTRING_LEN;
1386 		}
1387 		for (i = 0; i < netdev->num_tx_queues; i++) {
1388 			sprintf(p, "tx_queue_%u_packets", i);
1389 			p += ETH_GSTRING_LEN;
1390 			sprintf(p, "tx_queue_%u_bytes", i);
1391 			p += ETH_GSTRING_LEN;
1392 		}
1393 		for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1394 			sprintf(p, "rx_queue_%u_packets", i);
1395 			p += ETH_GSTRING_LEN;
1396 			sprintf(p, "rx_queue_%u_bytes", i);
1397 			p += ETH_GSTRING_LEN;
1398 		}
1399 		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1400 			sprintf(p, "tx_pb_%u_pxon", i);
1401 			p += ETH_GSTRING_LEN;
1402 			sprintf(p, "tx_pb_%u_pxoff", i);
1403 			p += ETH_GSTRING_LEN;
1404 		}
1405 		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1406 			sprintf(p, "rx_pb_%u_pxon", i);
1407 			p += ETH_GSTRING_LEN;
1408 			sprintf(p, "rx_pb_%u_pxoff", i);
1409 			p += ETH_GSTRING_LEN;
1410 		}
1411 		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1412 		break;
1413 	case ETH_SS_PRIV_FLAGS:
1414 		memcpy(data, ixgbe_priv_flags_strings,
1415 		       IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1416 	}
1417 }
1418 
1419 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1420 {
1421 	struct ixgbe_hw *hw = &adapter->hw;
1422 	bool link_up;
1423 	u32 link_speed = 0;
1424 
1425 	if (ixgbe_removed(hw->hw_addr)) {
1426 		*data = 1;
1427 		return 1;
1428 	}
1429 	*data = 0;
1430 
1431 	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1432 	if (link_up)
1433 		return *data;
1434 	else
1435 		*data = 1;
1436 	return *data;
1437 }
1438 
1439 /* ethtool register test data */
1440 struct ixgbe_reg_test {
1441 	u16 reg;
1442 	u8  array_len;
1443 	u8  test_type;
1444 	u32 mask;
1445 	u32 write;
1446 };
1447 
1448 /* In the hardware, registers are laid out either singly, in arrays
1449  * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1450  * most tests take place on arrays or single registers (handled
1451  * as a single-element array) and special-case the tables.
1452  * Table tests are always pattern tests.
1453  *
1454  * We also make provision for some required setup steps by specifying
1455  * registers to be written without any read-back testing.
1456  */
1457 
1458 #define PATTERN_TEST	1
1459 #define SET_READ_TEST	2
1460 #define WRITE_NO_TEST	3
1461 #define TABLE32_TEST	4
1462 #define TABLE64_TEST_LO	5
1463 #define TABLE64_TEST_HI	6
1464 
1465 /* default 82599 register test */
1466 static const struct ixgbe_reg_test reg_test_82599[] = {
1467 	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1468 	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1469 	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1470 	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1471 	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1472 	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1473 	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1474 	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1475 	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1476 	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1477 	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1478 	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1479 	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1480 	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1481 	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1482 	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1483 	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1484 	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1485 	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1486 	{ .reg = 0 }
1487 };
1488 
1489 /* default 82598 register test */
1490 static const struct ixgbe_reg_test reg_test_82598[] = {
1491 	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1492 	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1493 	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1494 	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1495 	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1496 	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1497 	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1498 	/* Enable all four RX queues before testing. */
1499 	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1500 	/* RDH is read-only for 82598, only test RDT. */
1501 	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1502 	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1503 	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1504 	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1505 	{ IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1506 	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1507 	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1508 	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1509 	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1510 	{ IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1511 	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1512 	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1513 	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1514 	{ .reg = 0 }
1515 };
1516 
1517 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1518 			     u32 mask, u32 write)
1519 {
1520 	u32 pat, val, before;
1521 	static const u32 test_pattern[] = {
1522 		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1523 
1524 	if (ixgbe_removed(adapter->hw.hw_addr)) {
1525 		*data = 1;
1526 		return true;
1527 	}
1528 	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1529 		before = ixgbe_read_reg(&adapter->hw, reg);
1530 		ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1531 		val = ixgbe_read_reg(&adapter->hw, reg);
1532 		if (val != (test_pattern[pat] & write & mask)) {
1533 			e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1534 			      reg, val, (test_pattern[pat] & write & mask));
1535 			*data = reg;
1536 			ixgbe_write_reg(&adapter->hw, reg, before);
1537 			return true;
1538 		}
1539 		ixgbe_write_reg(&adapter->hw, reg, before);
1540 	}
1541 	return false;
1542 }
1543 
1544 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1545 			      u32 mask, u32 write)
1546 {
1547 	u32 val, before;
1548 
1549 	if (ixgbe_removed(adapter->hw.hw_addr)) {
1550 		*data = 1;
1551 		return true;
1552 	}
1553 	before = ixgbe_read_reg(&adapter->hw, reg);
1554 	ixgbe_write_reg(&adapter->hw, reg, write & mask);
1555 	val = ixgbe_read_reg(&adapter->hw, reg);
1556 	if ((write & mask) != (val & mask)) {
1557 		e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1558 		      reg, (val & mask), (write & mask));
1559 		*data = reg;
1560 		ixgbe_write_reg(&adapter->hw, reg, before);
1561 		return true;
1562 	}
1563 	ixgbe_write_reg(&adapter->hw, reg, before);
1564 	return false;
1565 }
1566 
1567 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1568 {
1569 	const struct ixgbe_reg_test *test;
1570 	u32 value, before, after;
1571 	u32 i, toggle;
1572 
1573 	if (ixgbe_removed(adapter->hw.hw_addr)) {
1574 		e_err(drv, "Adapter removed - register test blocked\n");
1575 		*data = 1;
1576 		return 1;
1577 	}
1578 	switch (adapter->hw.mac.type) {
1579 	case ixgbe_mac_82598EB:
1580 		toggle = 0x7FFFF3FF;
1581 		test = reg_test_82598;
1582 		break;
1583 	case ixgbe_mac_82599EB:
1584 	case ixgbe_mac_X540:
1585 	case ixgbe_mac_X550:
1586 	case ixgbe_mac_X550EM_x:
1587 	case ixgbe_mac_x550em_a:
1588 		toggle = 0x7FFFF30F;
1589 		test = reg_test_82599;
1590 		break;
1591 	default:
1592 		*data = 1;
1593 		return 1;
1594 	}
1595 
1596 	/*
1597 	 * Because the status register is such a special case,
1598 	 * we handle it separately from the rest of the register
1599 	 * tests.  Some bits are read-only, some toggle, and some
1600 	 * are writeable on newer MACs.
1601 	 */
1602 	before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1603 	value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1604 	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1605 	after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1606 	if (value != after) {
1607 		e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1608 		      after, value);
1609 		*data = 1;
1610 		return 1;
1611 	}
1612 	/* restore previous status */
1613 	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1614 
1615 	/*
1616 	 * Perform the remainder of the register test, looping through
1617 	 * the test table until we either fail or reach the null entry.
1618 	 */
1619 	while (test->reg) {
1620 		for (i = 0; i < test->array_len; i++) {
1621 			bool b = false;
1622 
1623 			switch (test->test_type) {
1624 			case PATTERN_TEST:
1625 				b = reg_pattern_test(adapter, data,
1626 						     test->reg + (i * 0x40),
1627 						     test->mask,
1628 						     test->write);
1629 				break;
1630 			case SET_READ_TEST:
1631 				b = reg_set_and_check(adapter, data,
1632 						      test->reg + (i * 0x40),
1633 						      test->mask,
1634 						      test->write);
1635 				break;
1636 			case WRITE_NO_TEST:
1637 				ixgbe_write_reg(&adapter->hw,
1638 						test->reg + (i * 0x40),
1639 						test->write);
1640 				break;
1641 			case TABLE32_TEST:
1642 				b = reg_pattern_test(adapter, data,
1643 						     test->reg + (i * 4),
1644 						     test->mask,
1645 						     test->write);
1646 				break;
1647 			case TABLE64_TEST_LO:
1648 				b = reg_pattern_test(adapter, data,
1649 						     test->reg + (i * 8),
1650 						     test->mask,
1651 						     test->write);
1652 				break;
1653 			case TABLE64_TEST_HI:
1654 				b = reg_pattern_test(adapter, data,
1655 						     (test->reg + 4) + (i * 8),
1656 						     test->mask,
1657 						     test->write);
1658 				break;
1659 			}
1660 			if (b)
1661 				return 1;
1662 		}
1663 		test++;
1664 	}
1665 
1666 	*data = 0;
1667 	return 0;
1668 }
1669 
1670 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1671 {
1672 	struct ixgbe_hw *hw = &adapter->hw;
1673 	if (hw->eeprom.ops.validate_checksum(hw, NULL))
1674 		*data = 1;
1675 	else
1676 		*data = 0;
1677 	return *data;
1678 }
1679 
1680 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1681 {
1682 	struct net_device *netdev = (struct net_device *) data;
1683 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1684 
1685 	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1686 
1687 	return IRQ_HANDLED;
1688 }
1689 
1690 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1691 {
1692 	struct net_device *netdev = adapter->netdev;
1693 	u32 mask, i = 0, shared_int = true;
1694 	u32 irq = adapter->pdev->irq;
1695 
1696 	*data = 0;
1697 
1698 	/* Hook up test interrupt handler just for this test */
1699 	if (adapter->msix_entries) {
1700 		/* NOTE: we don't test MSI-X interrupts here, yet */
1701 		return 0;
1702 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1703 		shared_int = false;
1704 		if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1705 				netdev)) {
1706 			*data = 1;
1707 			return -1;
1708 		}
1709 	} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1710 				netdev->name, netdev)) {
1711 		shared_int = false;
1712 	} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1713 			       netdev->name, netdev)) {
1714 		*data = 1;
1715 		return -1;
1716 	}
1717 	e_info(hw, "testing %s interrupt\n", shared_int ?
1718 	       "shared" : "unshared");
1719 
1720 	/* Disable all the interrupts */
1721 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1722 	IXGBE_WRITE_FLUSH(&adapter->hw);
1723 	usleep_range(10000, 20000);
1724 
1725 	/* Test each interrupt */
1726 	for (; i < 10; i++) {
1727 		/* Interrupt to test */
1728 		mask = BIT(i);
1729 
1730 		if (!shared_int) {
1731 			/*
1732 			 * Disable the interrupts to be reported in
1733 			 * the cause register and then force the same
1734 			 * interrupt and see if one gets posted.  If
1735 			 * an interrupt was posted to the bus, the
1736 			 * test failed.
1737 			 */
1738 			adapter->test_icr = 0;
1739 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1740 					~mask & 0x00007FFF);
1741 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1742 					~mask & 0x00007FFF);
1743 			IXGBE_WRITE_FLUSH(&adapter->hw);
1744 			usleep_range(10000, 20000);
1745 
1746 			if (adapter->test_icr & mask) {
1747 				*data = 3;
1748 				break;
1749 			}
1750 		}
1751 
1752 		/*
1753 		 * Enable the interrupt to be reported in the cause
1754 		 * register and then force the same interrupt and see
1755 		 * if one gets posted.  If an interrupt was not posted
1756 		 * to the bus, the test failed.
1757 		 */
1758 		adapter->test_icr = 0;
1759 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1760 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1761 		IXGBE_WRITE_FLUSH(&adapter->hw);
1762 		usleep_range(10000, 20000);
1763 
1764 		if (!(adapter->test_icr & mask)) {
1765 			*data = 4;
1766 			break;
1767 		}
1768 
1769 		if (!shared_int) {
1770 			/*
1771 			 * Disable the other interrupts to be reported in
1772 			 * the cause register and then force the other
1773 			 * interrupts and see if any get posted.  If
1774 			 * an interrupt was posted to the bus, the
1775 			 * test failed.
1776 			 */
1777 			adapter->test_icr = 0;
1778 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1779 					~mask & 0x00007FFF);
1780 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1781 					~mask & 0x00007FFF);
1782 			IXGBE_WRITE_FLUSH(&adapter->hw);
1783 			usleep_range(10000, 20000);
1784 
1785 			if (adapter->test_icr) {
1786 				*data = 5;
1787 				break;
1788 			}
1789 		}
1790 	}
1791 
1792 	/* Disable all the interrupts */
1793 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1794 	IXGBE_WRITE_FLUSH(&adapter->hw);
1795 	usleep_range(10000, 20000);
1796 
1797 	/* Unhook test interrupt handler */
1798 	free_irq(irq, netdev);
1799 
1800 	return *data;
1801 }
1802 
1803 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1804 {
1805 	/* Shut down the DMA engines now so they can be reinitialized later,
1806 	 * since the test rings and normally used rings should overlap on
1807 	 * queue 0 we can just use the standard disable Rx/Tx calls and they
1808 	 * will take care of disabling the test rings for us.
1809 	 */
1810 
1811 	/* first Rx */
1812 	ixgbe_disable_rx(adapter);
1813 
1814 	/* now Tx */
1815 	ixgbe_disable_tx(adapter);
1816 
1817 	ixgbe_reset(adapter);
1818 
1819 	ixgbe_free_tx_resources(&adapter->test_tx_ring);
1820 	ixgbe_free_rx_resources(&adapter->test_rx_ring);
1821 }
1822 
1823 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1824 {
1825 	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1826 	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1827 	struct ixgbe_hw *hw = &adapter->hw;
1828 	u32 rctl, reg_data;
1829 	int ret_val;
1830 	int err;
1831 
1832 	/* Setup Tx descriptor ring and Tx buffers */
1833 	tx_ring->count = IXGBE_DEFAULT_TXD;
1834 	tx_ring->queue_index = 0;
1835 	tx_ring->dev = &adapter->pdev->dev;
1836 	tx_ring->netdev = adapter->netdev;
1837 	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1838 
1839 	err = ixgbe_setup_tx_resources(tx_ring);
1840 	if (err)
1841 		return 1;
1842 
1843 	switch (adapter->hw.mac.type) {
1844 	case ixgbe_mac_82599EB:
1845 	case ixgbe_mac_X540:
1846 	case ixgbe_mac_X550:
1847 	case ixgbe_mac_X550EM_x:
1848 	case ixgbe_mac_x550em_a:
1849 		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1850 		reg_data |= IXGBE_DMATXCTL_TE;
1851 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1852 		break;
1853 	default:
1854 		break;
1855 	}
1856 
1857 	ixgbe_configure_tx_ring(adapter, tx_ring);
1858 
1859 	/* Setup Rx Descriptor ring and Rx buffers */
1860 	rx_ring->count = IXGBE_DEFAULT_RXD;
1861 	rx_ring->queue_index = 0;
1862 	rx_ring->dev = &adapter->pdev->dev;
1863 	rx_ring->netdev = adapter->netdev;
1864 	rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1865 
1866 	err = ixgbe_setup_rx_resources(adapter, rx_ring);
1867 	if (err) {
1868 		ret_val = 4;
1869 		goto err_nomem;
1870 	}
1871 
1872 	hw->mac.ops.disable_rx(hw);
1873 
1874 	ixgbe_configure_rx_ring(adapter, rx_ring);
1875 
1876 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1877 	rctl |= IXGBE_RXCTRL_DMBYPS;
1878 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1879 
1880 	hw->mac.ops.enable_rx(hw);
1881 
1882 	return 0;
1883 
1884 err_nomem:
1885 	ixgbe_free_desc_rings(adapter);
1886 	return ret_val;
1887 }
1888 
1889 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1890 {
1891 	struct ixgbe_hw *hw = &adapter->hw;
1892 	u32 reg_data;
1893 
1894 
1895 	/* Setup MAC loopback */
1896 	reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1897 	reg_data |= IXGBE_HLREG0_LPBK;
1898 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1899 
1900 	reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1901 	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1902 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1903 
1904 	/* X540 and X550 needs to set the MACC.FLU bit to force link up */
1905 	switch (adapter->hw.mac.type) {
1906 	case ixgbe_mac_X540:
1907 	case ixgbe_mac_X550:
1908 	case ixgbe_mac_X550EM_x:
1909 	case ixgbe_mac_x550em_a:
1910 		reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1911 		reg_data |= IXGBE_MACC_FLU;
1912 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1913 		break;
1914 	default:
1915 		if (hw->mac.orig_autoc) {
1916 			reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1917 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1918 		} else {
1919 			return 10;
1920 		}
1921 	}
1922 	IXGBE_WRITE_FLUSH(hw);
1923 	usleep_range(10000, 20000);
1924 
1925 	/* Disable Atlas Tx lanes; re-enabled in reset path */
1926 	if (hw->mac.type == ixgbe_mac_82598EB) {
1927 		u8 atlas;
1928 
1929 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1930 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1931 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1932 
1933 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1934 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1935 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1936 
1937 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1938 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1939 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1940 
1941 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1942 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1943 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1944 	}
1945 
1946 	return 0;
1947 }
1948 
1949 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1950 {
1951 	u32 reg_data;
1952 
1953 	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1954 	reg_data &= ~IXGBE_HLREG0_LPBK;
1955 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1956 }
1957 
1958 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1959 				      unsigned int frame_size)
1960 {
1961 	memset(skb->data, 0xFF, frame_size);
1962 	frame_size >>= 1;
1963 	memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1964 	skb->data[frame_size + 10] = 0xBE;
1965 	skb->data[frame_size + 12] = 0xAF;
1966 }
1967 
1968 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1969 				     unsigned int frame_size)
1970 {
1971 	unsigned char *data;
1972 	bool match = true;
1973 
1974 	frame_size >>= 1;
1975 
1976 	data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1977 
1978 	if (data[3] != 0xFF ||
1979 	    data[frame_size + 10] != 0xBE ||
1980 	    data[frame_size + 12] != 0xAF)
1981 		match = false;
1982 
1983 	kunmap(rx_buffer->page);
1984 
1985 	return match;
1986 }
1987 
1988 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1989 				  struct ixgbe_ring *tx_ring,
1990 				  unsigned int size)
1991 {
1992 	union ixgbe_adv_rx_desc *rx_desc;
1993 	u16 rx_ntc, tx_ntc, count = 0;
1994 
1995 	/* initialize next to clean and descriptor values */
1996 	rx_ntc = rx_ring->next_to_clean;
1997 	tx_ntc = tx_ring->next_to_clean;
1998 	rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1999 
2000 	while (tx_ntc != tx_ring->next_to_use) {
2001 		union ixgbe_adv_tx_desc *tx_desc;
2002 		struct ixgbe_tx_buffer *tx_buffer;
2003 
2004 		tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
2005 
2006 		/* if DD is not set transmit has not completed */
2007 		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2008 			return count;
2009 
2010 		/* unmap buffer on Tx side */
2011 		tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
2012 
2013 		/* Free all the Tx ring sk_buffs */
2014 		dev_kfree_skb_any(tx_buffer->skb);
2015 
2016 		/* unmap skb header data */
2017 		dma_unmap_single(tx_ring->dev,
2018 				 dma_unmap_addr(tx_buffer, dma),
2019 				 dma_unmap_len(tx_buffer, len),
2020 				 DMA_TO_DEVICE);
2021 		dma_unmap_len_set(tx_buffer, len, 0);
2022 
2023 		/* increment Tx next to clean counter */
2024 		tx_ntc++;
2025 		if (tx_ntc == tx_ring->count)
2026 			tx_ntc = 0;
2027 	}
2028 
2029 	while (rx_desc->wb.upper.length) {
2030 		struct ixgbe_rx_buffer *rx_buffer;
2031 
2032 		/* check Rx buffer */
2033 		rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
2034 
2035 		/* sync Rx buffer for CPU read */
2036 		dma_sync_single_for_cpu(rx_ring->dev,
2037 					rx_buffer->dma,
2038 					ixgbe_rx_bufsz(rx_ring),
2039 					DMA_FROM_DEVICE);
2040 
2041 		/* verify contents of skb */
2042 		if (ixgbe_check_lbtest_frame(rx_buffer, size))
2043 			count++;
2044 		else
2045 			break;
2046 
2047 		/* sync Rx buffer for device write */
2048 		dma_sync_single_for_device(rx_ring->dev,
2049 					   rx_buffer->dma,
2050 					   ixgbe_rx_bufsz(rx_ring),
2051 					   DMA_FROM_DEVICE);
2052 
2053 		/* increment Rx next to clean counter */
2054 		rx_ntc++;
2055 		if (rx_ntc == rx_ring->count)
2056 			rx_ntc = 0;
2057 
2058 		/* fetch next descriptor */
2059 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2060 	}
2061 
2062 	netdev_tx_reset_queue(txring_txq(tx_ring));
2063 
2064 	/* re-map buffers to ring, store next to clean values */
2065 	ixgbe_alloc_rx_buffers(rx_ring, count);
2066 	rx_ring->next_to_clean = rx_ntc;
2067 	tx_ring->next_to_clean = tx_ntc;
2068 
2069 	return count;
2070 }
2071 
2072 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2073 {
2074 	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2075 	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2076 	int i, j, lc, good_cnt, ret_val = 0;
2077 	unsigned int size = 1024;
2078 	netdev_tx_t tx_ret_val;
2079 	struct sk_buff *skb;
2080 	u32 flags_orig = adapter->flags;
2081 
2082 	/* DCB can modify the frames on Tx */
2083 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2084 
2085 	/* allocate test skb */
2086 	skb = alloc_skb(size, GFP_KERNEL);
2087 	if (!skb)
2088 		return 11;
2089 
2090 	/* place data into test skb */
2091 	ixgbe_create_lbtest_frame(skb, size);
2092 	skb_put(skb, size);
2093 
2094 	/*
2095 	 * Calculate the loop count based on the largest descriptor ring
2096 	 * The idea is to wrap the largest ring a number of times using 64
2097 	 * send/receive pairs during each loop
2098 	 */
2099 
2100 	if (rx_ring->count <= tx_ring->count)
2101 		lc = ((tx_ring->count / 64) * 2) + 1;
2102 	else
2103 		lc = ((rx_ring->count / 64) * 2) + 1;
2104 
2105 	for (j = 0; j <= lc; j++) {
2106 		/* reset count of good packets */
2107 		good_cnt = 0;
2108 
2109 		/* place 64 packets on the transmit queue*/
2110 		for (i = 0; i < 64; i++) {
2111 			skb_get(skb);
2112 			tx_ret_val = ixgbe_xmit_frame_ring(skb,
2113 							   adapter,
2114 							   tx_ring);
2115 			if (tx_ret_val == NETDEV_TX_OK)
2116 				good_cnt++;
2117 		}
2118 
2119 		if (good_cnt != 64) {
2120 			ret_val = 12;
2121 			break;
2122 		}
2123 
2124 		/* allow 200 milliseconds for packets to go from Tx to Rx */
2125 		msleep(200);
2126 
2127 		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2128 		if (good_cnt != 64) {
2129 			ret_val = 13;
2130 			break;
2131 		}
2132 	}
2133 
2134 	/* free the original skb */
2135 	kfree_skb(skb);
2136 	adapter->flags = flags_orig;
2137 
2138 	return ret_val;
2139 }
2140 
2141 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2142 {
2143 	*data = ixgbe_setup_desc_rings(adapter);
2144 	if (*data)
2145 		goto out;
2146 	*data = ixgbe_setup_loopback_test(adapter);
2147 	if (*data)
2148 		goto err_loopback;
2149 	*data = ixgbe_run_loopback_test(adapter);
2150 	ixgbe_loopback_cleanup(adapter);
2151 
2152 err_loopback:
2153 	ixgbe_free_desc_rings(adapter);
2154 out:
2155 	return *data;
2156 }
2157 
2158 static void ixgbe_diag_test(struct net_device *netdev,
2159 			    struct ethtool_test *eth_test, u64 *data)
2160 {
2161 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2162 	bool if_running = netif_running(netdev);
2163 
2164 	if (ixgbe_removed(adapter->hw.hw_addr)) {
2165 		e_err(hw, "Adapter removed - test blocked\n");
2166 		data[0] = 1;
2167 		data[1] = 1;
2168 		data[2] = 1;
2169 		data[3] = 1;
2170 		data[4] = 1;
2171 		eth_test->flags |= ETH_TEST_FL_FAILED;
2172 		return;
2173 	}
2174 	set_bit(__IXGBE_TESTING, &adapter->state);
2175 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2176 		struct ixgbe_hw *hw = &adapter->hw;
2177 
2178 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2179 			int i;
2180 			for (i = 0; i < adapter->num_vfs; i++) {
2181 				if (adapter->vfinfo[i].clear_to_send) {
2182 					netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2183 					data[0] = 1;
2184 					data[1] = 1;
2185 					data[2] = 1;
2186 					data[3] = 1;
2187 					data[4] = 1;
2188 					eth_test->flags |= ETH_TEST_FL_FAILED;
2189 					clear_bit(__IXGBE_TESTING,
2190 						  &adapter->state);
2191 					return;
2192 				}
2193 			}
2194 		}
2195 
2196 		/* Offline tests */
2197 		e_info(hw, "offline testing starting\n");
2198 
2199 		/* Link test performed before hardware reset so autoneg doesn't
2200 		 * interfere with test result
2201 		 */
2202 		if (ixgbe_link_test(adapter, &data[4]))
2203 			eth_test->flags |= ETH_TEST_FL_FAILED;
2204 
2205 		if (if_running)
2206 			/* indicate we're in test mode */
2207 			ixgbe_close(netdev);
2208 		else
2209 			ixgbe_reset(adapter);
2210 
2211 		e_info(hw, "register testing starting\n");
2212 		if (ixgbe_reg_test(adapter, &data[0]))
2213 			eth_test->flags |= ETH_TEST_FL_FAILED;
2214 
2215 		ixgbe_reset(adapter);
2216 		e_info(hw, "eeprom testing starting\n");
2217 		if (ixgbe_eeprom_test(adapter, &data[1]))
2218 			eth_test->flags |= ETH_TEST_FL_FAILED;
2219 
2220 		ixgbe_reset(adapter);
2221 		e_info(hw, "interrupt testing starting\n");
2222 		if (ixgbe_intr_test(adapter, &data[2]))
2223 			eth_test->flags |= ETH_TEST_FL_FAILED;
2224 
2225 		/* If SRIOV or VMDq is enabled then skip MAC
2226 		 * loopback diagnostic. */
2227 		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2228 				      IXGBE_FLAG_VMDQ_ENABLED)) {
2229 			e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2230 			data[3] = 0;
2231 			goto skip_loopback;
2232 		}
2233 
2234 		ixgbe_reset(adapter);
2235 		e_info(hw, "loopback testing starting\n");
2236 		if (ixgbe_loopback_test(adapter, &data[3]))
2237 			eth_test->flags |= ETH_TEST_FL_FAILED;
2238 
2239 skip_loopback:
2240 		ixgbe_reset(adapter);
2241 
2242 		/* clear testing bit and return adapter to previous state */
2243 		clear_bit(__IXGBE_TESTING, &adapter->state);
2244 		if (if_running)
2245 			ixgbe_open(netdev);
2246 		else if (hw->mac.ops.disable_tx_laser)
2247 			hw->mac.ops.disable_tx_laser(hw);
2248 	} else {
2249 		e_info(hw, "online testing starting\n");
2250 
2251 		/* Online tests */
2252 		if (ixgbe_link_test(adapter, &data[4]))
2253 			eth_test->flags |= ETH_TEST_FL_FAILED;
2254 
2255 		/* Offline tests aren't run; pass by default */
2256 		data[0] = 0;
2257 		data[1] = 0;
2258 		data[2] = 0;
2259 		data[3] = 0;
2260 
2261 		clear_bit(__IXGBE_TESTING, &adapter->state);
2262 	}
2263 }
2264 
2265 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2266 			       struct ethtool_wolinfo *wol)
2267 {
2268 	struct ixgbe_hw *hw = &adapter->hw;
2269 	int retval = 0;
2270 
2271 	/* WOL not supported for all devices */
2272 	if (!ixgbe_wol_supported(adapter, hw->device_id,
2273 				 hw->subsystem_device_id)) {
2274 		retval = 1;
2275 		wol->supported = 0;
2276 	}
2277 
2278 	return retval;
2279 }
2280 
2281 static void ixgbe_get_wol(struct net_device *netdev,
2282 			  struct ethtool_wolinfo *wol)
2283 {
2284 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2285 
2286 	wol->supported = WAKE_UCAST | WAKE_MCAST |
2287 			 WAKE_BCAST | WAKE_MAGIC;
2288 	wol->wolopts = 0;
2289 
2290 	if (ixgbe_wol_exclusion(adapter, wol) ||
2291 	    !device_can_wakeup(&adapter->pdev->dev))
2292 		return;
2293 
2294 	if (adapter->wol & IXGBE_WUFC_EX)
2295 		wol->wolopts |= WAKE_UCAST;
2296 	if (adapter->wol & IXGBE_WUFC_MC)
2297 		wol->wolopts |= WAKE_MCAST;
2298 	if (adapter->wol & IXGBE_WUFC_BC)
2299 		wol->wolopts |= WAKE_BCAST;
2300 	if (adapter->wol & IXGBE_WUFC_MAG)
2301 		wol->wolopts |= WAKE_MAGIC;
2302 }
2303 
2304 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2305 {
2306 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2307 
2308 	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
2309 			    WAKE_FILTER))
2310 		return -EOPNOTSUPP;
2311 
2312 	if (ixgbe_wol_exclusion(adapter, wol))
2313 		return wol->wolopts ? -EOPNOTSUPP : 0;
2314 
2315 	adapter->wol = 0;
2316 
2317 	if (wol->wolopts & WAKE_UCAST)
2318 		adapter->wol |= IXGBE_WUFC_EX;
2319 	if (wol->wolopts & WAKE_MCAST)
2320 		adapter->wol |= IXGBE_WUFC_MC;
2321 	if (wol->wolopts & WAKE_BCAST)
2322 		adapter->wol |= IXGBE_WUFC_BC;
2323 	if (wol->wolopts & WAKE_MAGIC)
2324 		adapter->wol |= IXGBE_WUFC_MAG;
2325 
2326 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2327 
2328 	return 0;
2329 }
2330 
2331 static int ixgbe_nway_reset(struct net_device *netdev)
2332 {
2333 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2334 
2335 	if (netif_running(netdev))
2336 		ixgbe_reinit_locked(adapter);
2337 
2338 	return 0;
2339 }
2340 
2341 static int ixgbe_set_phys_id(struct net_device *netdev,
2342 			     enum ethtool_phys_id_state state)
2343 {
2344 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2345 	struct ixgbe_hw *hw = &adapter->hw;
2346 
2347 	if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2348 		return -EOPNOTSUPP;
2349 
2350 	switch (state) {
2351 	case ETHTOOL_ID_ACTIVE:
2352 		adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2353 		return 2;
2354 
2355 	case ETHTOOL_ID_ON:
2356 		hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2357 		break;
2358 
2359 	case ETHTOOL_ID_OFF:
2360 		hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2361 		break;
2362 
2363 	case ETHTOOL_ID_INACTIVE:
2364 		/* Restore LED settings */
2365 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2366 		break;
2367 	}
2368 
2369 	return 0;
2370 }
2371 
2372 static int ixgbe_get_coalesce(struct net_device *netdev,
2373 			      struct ethtool_coalesce *ec)
2374 {
2375 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2376 
2377 	/* only valid if in constant ITR mode */
2378 	if (adapter->rx_itr_setting <= 1)
2379 		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2380 	else
2381 		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2382 
2383 	/* if in mixed tx/rx queues per vector mode, report only rx settings */
2384 	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2385 		return 0;
2386 
2387 	/* only valid if in constant ITR mode */
2388 	if (adapter->tx_itr_setting <= 1)
2389 		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2390 	else
2391 		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2392 
2393 	return 0;
2394 }
2395 
2396 /*
2397  * this function must be called before setting the new value of
2398  * rx_itr_setting
2399  */
2400 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2401 {
2402 	struct net_device *netdev = adapter->netdev;
2403 
2404 	/* nothing to do if LRO or RSC are not enabled */
2405 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2406 	    !(netdev->features & NETIF_F_LRO))
2407 		return false;
2408 
2409 	/* check the feature flag value and enable RSC if necessary */
2410 	if (adapter->rx_itr_setting == 1 ||
2411 	    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2412 		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2413 			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2414 			e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2415 			return true;
2416 		}
2417 	/* if interrupt rate is too high then disable RSC */
2418 	} else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2419 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2420 		e_info(probe, "rx-usecs set too low, disabling RSC\n");
2421 		return true;
2422 	}
2423 	return false;
2424 }
2425 
2426 static int ixgbe_set_coalesce(struct net_device *netdev,
2427 			      struct ethtool_coalesce *ec)
2428 {
2429 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2430 	struct ixgbe_q_vector *q_vector;
2431 	int i;
2432 	u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2433 	bool need_reset = false;
2434 
2435 	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2436 		/* reject Tx specific changes in case of mixed RxTx vectors */
2437 		if (ec->tx_coalesce_usecs)
2438 			return -EINVAL;
2439 		tx_itr_prev = adapter->rx_itr_setting;
2440 	} else {
2441 		tx_itr_prev = adapter->tx_itr_setting;
2442 	}
2443 
2444 	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2445 	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2446 		return -EINVAL;
2447 
2448 	if (ec->rx_coalesce_usecs > 1)
2449 		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2450 	else
2451 		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2452 
2453 	if (adapter->rx_itr_setting == 1)
2454 		rx_itr_param = IXGBE_20K_ITR;
2455 	else
2456 		rx_itr_param = adapter->rx_itr_setting;
2457 
2458 	if (ec->tx_coalesce_usecs > 1)
2459 		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2460 	else
2461 		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2462 
2463 	if (adapter->tx_itr_setting == 1)
2464 		tx_itr_param = IXGBE_12K_ITR;
2465 	else
2466 		tx_itr_param = adapter->tx_itr_setting;
2467 
2468 	/* mixed Rx/Tx */
2469 	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2470 		adapter->tx_itr_setting = adapter->rx_itr_setting;
2471 
2472 	/* detect ITR changes that require update of TXDCTL.WTHRESH */
2473 	if ((adapter->tx_itr_setting != 1) &&
2474 	    (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2475 		if ((tx_itr_prev == 1) ||
2476 		    (tx_itr_prev >= IXGBE_100K_ITR))
2477 			need_reset = true;
2478 	} else {
2479 		if ((tx_itr_prev != 1) &&
2480 		    (tx_itr_prev < IXGBE_100K_ITR))
2481 			need_reset = true;
2482 	}
2483 
2484 	/* check the old value and enable RSC if necessary */
2485 	need_reset |= ixgbe_update_rsc(adapter);
2486 
2487 	for (i = 0; i < adapter->num_q_vectors; i++) {
2488 		q_vector = adapter->q_vector[i];
2489 		if (q_vector->tx.count && !q_vector->rx.count)
2490 			/* tx only */
2491 			q_vector->itr = tx_itr_param;
2492 		else
2493 			/* rx only or mixed */
2494 			q_vector->itr = rx_itr_param;
2495 		ixgbe_write_eitr(q_vector);
2496 	}
2497 
2498 	/*
2499 	 * do reset here at the end to make sure EITR==0 case is handled
2500 	 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2501 	 * also locks in RSC enable/disable which requires reset
2502 	 */
2503 	if (need_reset)
2504 		ixgbe_do_reset(netdev);
2505 
2506 	return 0;
2507 }
2508 
2509 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2510 					struct ethtool_rxnfc *cmd)
2511 {
2512 	union ixgbe_atr_input *mask = &adapter->fdir_mask;
2513 	struct ethtool_rx_flow_spec *fsp =
2514 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2515 	struct hlist_node *node2;
2516 	struct ixgbe_fdir_filter *rule = NULL;
2517 
2518 	/* report total rule count */
2519 	cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2520 
2521 	hlist_for_each_entry_safe(rule, node2,
2522 				  &adapter->fdir_filter_list, fdir_node) {
2523 		if (fsp->location <= rule->sw_idx)
2524 			break;
2525 	}
2526 
2527 	if (!rule || fsp->location != rule->sw_idx)
2528 		return -EINVAL;
2529 
2530 	/* fill out the flow spec entry */
2531 
2532 	/* set flow type field */
2533 	switch (rule->filter.formatted.flow_type) {
2534 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2535 		fsp->flow_type = TCP_V4_FLOW;
2536 		break;
2537 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2538 		fsp->flow_type = UDP_V4_FLOW;
2539 		break;
2540 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2541 		fsp->flow_type = SCTP_V4_FLOW;
2542 		break;
2543 	case IXGBE_ATR_FLOW_TYPE_IPV4:
2544 		fsp->flow_type = IP_USER_FLOW;
2545 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2546 		fsp->h_u.usr_ip4_spec.proto = 0;
2547 		fsp->m_u.usr_ip4_spec.proto = 0;
2548 		break;
2549 	default:
2550 		return -EINVAL;
2551 	}
2552 
2553 	fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2554 	fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2555 	fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2556 	fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2557 	fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2558 	fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2559 	fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2560 	fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2561 	fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2562 	fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2563 	fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2564 	fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2565 	fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2566 	fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2567 	fsp->flow_type |= FLOW_EXT;
2568 
2569 	/* record action */
2570 	if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2571 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
2572 	else
2573 		fsp->ring_cookie = rule->action;
2574 
2575 	return 0;
2576 }
2577 
2578 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2579 				      struct ethtool_rxnfc *cmd,
2580 				      u32 *rule_locs)
2581 {
2582 	struct hlist_node *node2;
2583 	struct ixgbe_fdir_filter *rule;
2584 	int cnt = 0;
2585 
2586 	/* report total rule count */
2587 	cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2588 
2589 	hlist_for_each_entry_safe(rule, node2,
2590 				  &adapter->fdir_filter_list, fdir_node) {
2591 		if (cnt == cmd->rule_cnt)
2592 			return -EMSGSIZE;
2593 		rule_locs[cnt] = rule->sw_idx;
2594 		cnt++;
2595 	}
2596 
2597 	cmd->rule_cnt = cnt;
2598 
2599 	return 0;
2600 }
2601 
2602 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2603 				   struct ethtool_rxnfc *cmd)
2604 {
2605 	cmd->data = 0;
2606 
2607 	/* Report default options for RSS on ixgbe */
2608 	switch (cmd->flow_type) {
2609 	case TCP_V4_FLOW:
2610 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2611 		fallthrough;
2612 	case UDP_V4_FLOW:
2613 		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2614 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2615 		fallthrough;
2616 	case SCTP_V4_FLOW:
2617 	case AH_ESP_V4_FLOW:
2618 	case AH_V4_FLOW:
2619 	case ESP_V4_FLOW:
2620 	case IPV4_FLOW:
2621 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2622 		break;
2623 	case TCP_V6_FLOW:
2624 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2625 		fallthrough;
2626 	case UDP_V6_FLOW:
2627 		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2628 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2629 		fallthrough;
2630 	case SCTP_V6_FLOW:
2631 	case AH_ESP_V6_FLOW:
2632 	case AH_V6_FLOW:
2633 	case ESP_V6_FLOW:
2634 	case IPV6_FLOW:
2635 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2636 		break;
2637 	default:
2638 		return -EINVAL;
2639 	}
2640 
2641 	return 0;
2642 }
2643 
2644 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2645 			   u32 *rule_locs)
2646 {
2647 	struct ixgbe_adapter *adapter = netdev_priv(dev);
2648 	int ret = -EOPNOTSUPP;
2649 
2650 	switch (cmd->cmd) {
2651 	case ETHTOOL_GRXRINGS:
2652 		cmd->data = adapter->num_rx_queues;
2653 		ret = 0;
2654 		break;
2655 	case ETHTOOL_GRXCLSRLCNT:
2656 		cmd->rule_cnt = adapter->fdir_filter_count;
2657 		ret = 0;
2658 		break;
2659 	case ETHTOOL_GRXCLSRULE:
2660 		ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2661 		break;
2662 	case ETHTOOL_GRXCLSRLALL:
2663 		ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2664 		break;
2665 	case ETHTOOL_GRXFH:
2666 		ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2667 		break;
2668 	default:
2669 		break;
2670 	}
2671 
2672 	return ret;
2673 }
2674 
2675 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2676 				    struct ixgbe_fdir_filter *input,
2677 				    u16 sw_idx)
2678 {
2679 	struct ixgbe_hw *hw = &adapter->hw;
2680 	struct hlist_node *node2;
2681 	struct ixgbe_fdir_filter *rule, *parent;
2682 	int err = -EINVAL;
2683 
2684 	parent = NULL;
2685 	rule = NULL;
2686 
2687 	hlist_for_each_entry_safe(rule, node2,
2688 				  &adapter->fdir_filter_list, fdir_node) {
2689 		/* hash found, or no matching entry */
2690 		if (rule->sw_idx >= sw_idx)
2691 			break;
2692 		parent = rule;
2693 	}
2694 
2695 	/* if there is an old rule occupying our place remove it */
2696 	if (rule && (rule->sw_idx == sw_idx)) {
2697 		if (!input || (rule->filter.formatted.bkt_hash !=
2698 			       input->filter.formatted.bkt_hash)) {
2699 			err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2700 								&rule->filter,
2701 								sw_idx);
2702 		}
2703 
2704 		hlist_del(&rule->fdir_node);
2705 		kfree(rule);
2706 		adapter->fdir_filter_count--;
2707 	}
2708 
2709 	/*
2710 	 * If no input this was a delete, err should be 0 if a rule was
2711 	 * successfully found and removed from the list else -EINVAL
2712 	 */
2713 	if (!input)
2714 		return err;
2715 
2716 	/* initialize node and set software index */
2717 	INIT_HLIST_NODE(&input->fdir_node);
2718 
2719 	/* add filter to the list */
2720 	if (parent)
2721 		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2722 	else
2723 		hlist_add_head(&input->fdir_node,
2724 			       &adapter->fdir_filter_list);
2725 
2726 	/* update counts */
2727 	adapter->fdir_filter_count++;
2728 
2729 	return 0;
2730 }
2731 
2732 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2733 				       u8 *flow_type)
2734 {
2735 	switch (fsp->flow_type & ~FLOW_EXT) {
2736 	case TCP_V4_FLOW:
2737 		*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2738 		break;
2739 	case UDP_V4_FLOW:
2740 		*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2741 		break;
2742 	case SCTP_V4_FLOW:
2743 		*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2744 		break;
2745 	case IP_USER_FLOW:
2746 		switch (fsp->h_u.usr_ip4_spec.proto) {
2747 		case IPPROTO_TCP:
2748 			*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2749 			break;
2750 		case IPPROTO_UDP:
2751 			*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2752 			break;
2753 		case IPPROTO_SCTP:
2754 			*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2755 			break;
2756 		case 0:
2757 			if (!fsp->m_u.usr_ip4_spec.proto) {
2758 				*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2759 				break;
2760 			}
2761 			fallthrough;
2762 		default:
2763 			return 0;
2764 		}
2765 		break;
2766 	default:
2767 		return 0;
2768 	}
2769 
2770 	return 1;
2771 }
2772 
2773 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2774 					struct ethtool_rxnfc *cmd)
2775 {
2776 	struct ethtool_rx_flow_spec *fsp =
2777 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2778 	struct ixgbe_hw *hw = &adapter->hw;
2779 	struct ixgbe_fdir_filter *input;
2780 	union ixgbe_atr_input mask;
2781 	u8 queue;
2782 	int err;
2783 
2784 	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2785 		return -EOPNOTSUPP;
2786 
2787 	/* ring_cookie is a masked into a set of queues and ixgbe pools or
2788 	 * we use the drop index.
2789 	 */
2790 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2791 		queue = IXGBE_FDIR_DROP_QUEUE;
2792 	} else {
2793 		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2794 		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2795 
2796 		if (!vf && (ring >= adapter->num_rx_queues))
2797 			return -EINVAL;
2798 		else if (vf &&
2799 			 ((vf > adapter->num_vfs) ||
2800 			   ring >= adapter->num_rx_queues_per_pool))
2801 			return -EINVAL;
2802 
2803 		/* Map the ring onto the absolute queue index */
2804 		if (!vf)
2805 			queue = adapter->rx_ring[ring]->reg_idx;
2806 		else
2807 			queue = ((vf - 1) *
2808 				adapter->num_rx_queues_per_pool) + ring;
2809 	}
2810 
2811 	/* Don't allow indexes to exist outside of available space */
2812 	if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2813 		e_err(drv, "Location out of range\n");
2814 		return -EINVAL;
2815 	}
2816 
2817 	input = kzalloc(sizeof(*input), GFP_ATOMIC);
2818 	if (!input)
2819 		return -ENOMEM;
2820 
2821 	memset(&mask, 0, sizeof(union ixgbe_atr_input));
2822 
2823 	/* set SW index */
2824 	input->sw_idx = fsp->location;
2825 
2826 	/* record flow type */
2827 	if (!ixgbe_flowspec_to_flow_type(fsp,
2828 					 &input->filter.formatted.flow_type)) {
2829 		e_err(drv, "Unrecognized flow type\n");
2830 		goto err_out;
2831 	}
2832 
2833 	mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2834 				   IXGBE_ATR_L4TYPE_MASK;
2835 
2836 	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2837 		mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2838 
2839 	/* Copy input into formatted structures */
2840 	input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2841 	mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2842 	input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2843 	mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2844 	input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2845 	mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2846 	input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2847 	mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2848 
2849 	if (fsp->flow_type & FLOW_EXT) {
2850 		input->filter.formatted.vm_pool =
2851 				(unsigned char)ntohl(fsp->h_ext.data[1]);
2852 		mask.formatted.vm_pool =
2853 				(unsigned char)ntohl(fsp->m_ext.data[1]);
2854 		input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2855 		mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2856 		input->filter.formatted.flex_bytes =
2857 						fsp->h_ext.vlan_etype;
2858 		mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2859 	}
2860 
2861 	/* determine if we need to drop or route the packet */
2862 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2863 		input->action = IXGBE_FDIR_DROP_QUEUE;
2864 	else
2865 		input->action = fsp->ring_cookie;
2866 
2867 	spin_lock(&adapter->fdir_perfect_lock);
2868 
2869 	if (hlist_empty(&adapter->fdir_filter_list)) {
2870 		/* save mask and program input mask into HW */
2871 		memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2872 		err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2873 		if (err) {
2874 			e_err(drv, "Error writing mask\n");
2875 			goto err_out_w_lock;
2876 		}
2877 	} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2878 		e_err(drv, "Only one mask supported per port\n");
2879 		goto err_out_w_lock;
2880 	}
2881 
2882 	/* apply mask and compute/store hash */
2883 	ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2884 
2885 	/* program filters to filter memory */
2886 	err = ixgbe_fdir_write_perfect_filter_82599(hw,
2887 				&input->filter, input->sw_idx, queue);
2888 	if (err)
2889 		goto err_out_w_lock;
2890 
2891 	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2892 
2893 	spin_unlock(&adapter->fdir_perfect_lock);
2894 
2895 	return err;
2896 err_out_w_lock:
2897 	spin_unlock(&adapter->fdir_perfect_lock);
2898 err_out:
2899 	kfree(input);
2900 	return -EINVAL;
2901 }
2902 
2903 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2904 					struct ethtool_rxnfc *cmd)
2905 {
2906 	struct ethtool_rx_flow_spec *fsp =
2907 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2908 	int err;
2909 
2910 	spin_lock(&adapter->fdir_perfect_lock);
2911 	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2912 	spin_unlock(&adapter->fdir_perfect_lock);
2913 
2914 	return err;
2915 }
2916 
2917 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2918 		       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2919 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2920 				  struct ethtool_rxnfc *nfc)
2921 {
2922 	u32 flags2 = adapter->flags2;
2923 
2924 	/*
2925 	 * RSS does not support anything other than hashing
2926 	 * to queues on src and dst IPs and ports
2927 	 */
2928 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2929 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
2930 		return -EINVAL;
2931 
2932 	switch (nfc->flow_type) {
2933 	case TCP_V4_FLOW:
2934 	case TCP_V6_FLOW:
2935 		if (!(nfc->data & RXH_IP_SRC) ||
2936 		    !(nfc->data & RXH_IP_DST) ||
2937 		    !(nfc->data & RXH_L4_B_0_1) ||
2938 		    !(nfc->data & RXH_L4_B_2_3))
2939 			return -EINVAL;
2940 		break;
2941 	case UDP_V4_FLOW:
2942 		if (!(nfc->data & RXH_IP_SRC) ||
2943 		    !(nfc->data & RXH_IP_DST))
2944 			return -EINVAL;
2945 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2946 		case 0:
2947 			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2948 			break;
2949 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2950 			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2951 			break;
2952 		default:
2953 			return -EINVAL;
2954 		}
2955 		break;
2956 	case UDP_V6_FLOW:
2957 		if (!(nfc->data & RXH_IP_SRC) ||
2958 		    !(nfc->data & RXH_IP_DST))
2959 			return -EINVAL;
2960 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2961 		case 0:
2962 			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2963 			break;
2964 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2965 			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2966 			break;
2967 		default:
2968 			return -EINVAL;
2969 		}
2970 		break;
2971 	case AH_ESP_V4_FLOW:
2972 	case AH_V4_FLOW:
2973 	case ESP_V4_FLOW:
2974 	case SCTP_V4_FLOW:
2975 	case AH_ESP_V6_FLOW:
2976 	case AH_V6_FLOW:
2977 	case ESP_V6_FLOW:
2978 	case SCTP_V6_FLOW:
2979 		if (!(nfc->data & RXH_IP_SRC) ||
2980 		    !(nfc->data & RXH_IP_DST) ||
2981 		    (nfc->data & RXH_L4_B_0_1) ||
2982 		    (nfc->data & RXH_L4_B_2_3))
2983 			return -EINVAL;
2984 		break;
2985 	default:
2986 		return -EINVAL;
2987 	}
2988 
2989 	/* if we changed something we need to update flags */
2990 	if (flags2 != adapter->flags2) {
2991 		struct ixgbe_hw *hw = &adapter->hw;
2992 		u32 mrqc;
2993 		unsigned int pf_pool = adapter->num_vfs;
2994 
2995 		if ((hw->mac.type >= ixgbe_mac_X550) &&
2996 		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2997 			mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2998 		else
2999 			mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3000 
3001 		if ((flags2 & UDP_RSS_FLAGS) &&
3002 		    !(adapter->flags2 & UDP_RSS_FLAGS))
3003 			e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
3004 
3005 		adapter->flags2 = flags2;
3006 
3007 		/* Perform hash on these packet types */
3008 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
3009 		      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3010 		      | IXGBE_MRQC_RSS_FIELD_IPV6
3011 		      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3012 
3013 		mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
3014 			  IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
3015 
3016 		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3017 			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3018 
3019 		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3020 			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3021 
3022 		if ((hw->mac.type >= ixgbe_mac_X550) &&
3023 		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3024 			IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
3025 		else
3026 			IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3027 	}
3028 
3029 	return 0;
3030 }
3031 
3032 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3033 {
3034 	struct ixgbe_adapter *adapter = netdev_priv(dev);
3035 	int ret = -EOPNOTSUPP;
3036 
3037 	switch (cmd->cmd) {
3038 	case ETHTOOL_SRXCLSRLINS:
3039 		ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
3040 		break;
3041 	case ETHTOOL_SRXCLSRLDEL:
3042 		ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
3043 		break;
3044 	case ETHTOOL_SRXFH:
3045 		ret = ixgbe_set_rss_hash_opt(adapter, cmd);
3046 		break;
3047 	default:
3048 		break;
3049 	}
3050 
3051 	return ret;
3052 }
3053 
3054 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
3055 {
3056 	if (adapter->hw.mac.type < ixgbe_mac_X550)
3057 		return 16;
3058 	else
3059 		return 64;
3060 }
3061 
3062 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
3063 {
3064 	return IXGBE_RSS_KEY_SIZE;
3065 }
3066 
3067 static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3068 {
3069 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3070 
3071 	return ixgbe_rss_indir_tbl_entries(adapter);
3072 }
3073 
3074 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3075 {
3076 	int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3077 	u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3078 
3079 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3080 		rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3081 
3082 	for (i = 0; i < reta_size; i++)
3083 		indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3084 }
3085 
3086 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3087 			  u8 *hfunc)
3088 {
3089 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3090 
3091 	if (hfunc)
3092 		*hfunc = ETH_RSS_HASH_TOP;
3093 
3094 	if (indir)
3095 		ixgbe_get_reta(adapter, indir);
3096 
3097 	if (key)
3098 		memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3099 
3100 	return 0;
3101 }
3102 
3103 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3104 			  const u8 *key, const u8 hfunc)
3105 {
3106 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3107 	int i;
3108 	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3109 
3110 	if (hfunc)
3111 		return -EINVAL;
3112 
3113 	/* Fill out the redirection table */
3114 	if (indir) {
3115 		int max_queues = min_t(int, adapter->num_rx_queues,
3116 				       ixgbe_rss_indir_tbl_max(adapter));
3117 
3118 		/*Allow at least 2 queues w/ SR-IOV.*/
3119 		if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3120 		    (max_queues < 2))
3121 			max_queues = 2;
3122 
3123 		/* Verify user input. */
3124 		for (i = 0; i < reta_entries; i++)
3125 			if (indir[i] >= max_queues)
3126 				return -EINVAL;
3127 
3128 		for (i = 0; i < reta_entries; i++)
3129 			adapter->rss_indir_tbl[i] = indir[i];
3130 
3131 		ixgbe_store_reta(adapter);
3132 	}
3133 
3134 	/* Fill out the rss hash key */
3135 	if (key) {
3136 		memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3137 		ixgbe_store_key(adapter);
3138 	}
3139 
3140 	return 0;
3141 }
3142 
3143 static int ixgbe_get_ts_info(struct net_device *dev,
3144 			     struct ethtool_ts_info *info)
3145 {
3146 	struct ixgbe_adapter *adapter = netdev_priv(dev);
3147 
3148 	/* we always support timestamping disabled */
3149 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3150 
3151 	switch (adapter->hw.mac.type) {
3152 	case ixgbe_mac_X550:
3153 	case ixgbe_mac_X550EM_x:
3154 	case ixgbe_mac_x550em_a:
3155 		info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3156 		break;
3157 	case ixgbe_mac_X540:
3158 	case ixgbe_mac_82599EB:
3159 		info->rx_filters |=
3160 			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3161 			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3162 			BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3163 		break;
3164 	default:
3165 		return ethtool_op_get_ts_info(dev, info);
3166 	}
3167 
3168 	info->so_timestamping =
3169 		SOF_TIMESTAMPING_TX_SOFTWARE |
3170 		SOF_TIMESTAMPING_RX_SOFTWARE |
3171 		SOF_TIMESTAMPING_SOFTWARE |
3172 		SOF_TIMESTAMPING_TX_HARDWARE |
3173 		SOF_TIMESTAMPING_RX_HARDWARE |
3174 		SOF_TIMESTAMPING_RAW_HARDWARE;
3175 
3176 	if (adapter->ptp_clock)
3177 		info->phc_index = ptp_clock_index(adapter->ptp_clock);
3178 	else
3179 		info->phc_index = -1;
3180 
3181 	info->tx_types =
3182 		BIT(HWTSTAMP_TX_OFF) |
3183 		BIT(HWTSTAMP_TX_ON);
3184 
3185 	return 0;
3186 }
3187 
3188 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3189 {
3190 	unsigned int max_combined;
3191 	u8 tcs = adapter->hw_tcs;
3192 
3193 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3194 		/* We only support one q_vector without MSI-X */
3195 		max_combined = 1;
3196 	} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3197 		/* Limit value based on the queue mask */
3198 		max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3199 	} else if (tcs > 1) {
3200 		/* For DCB report channels per traffic class */
3201 		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3202 			/* 8 TC w/ 4 queues per TC */
3203 			max_combined = 4;
3204 		} else if (tcs > 4) {
3205 			/* 8 TC w/ 8 queues per TC */
3206 			max_combined = 8;
3207 		} else {
3208 			/* 4 TC w/ 16 queues per TC */
3209 			max_combined = 16;
3210 		}
3211 	} else if (adapter->atr_sample_rate) {
3212 		/* support up to 64 queues with ATR */
3213 		max_combined = IXGBE_MAX_FDIR_INDICES;
3214 	} else {
3215 		/* support up to 16 queues with RSS */
3216 		max_combined = ixgbe_max_rss_indices(adapter);
3217 	}
3218 
3219 	return max_combined;
3220 }
3221 
3222 static void ixgbe_get_channels(struct net_device *dev,
3223 			       struct ethtool_channels *ch)
3224 {
3225 	struct ixgbe_adapter *adapter = netdev_priv(dev);
3226 
3227 	/* report maximum channels */
3228 	ch->max_combined = ixgbe_max_channels(adapter);
3229 
3230 	/* report info for other vector */
3231 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3232 		ch->max_other = NON_Q_VECTORS;
3233 		ch->other_count = NON_Q_VECTORS;
3234 	}
3235 
3236 	/* record RSS queues */
3237 	ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3238 
3239 	/* nothing else to report if RSS is disabled */
3240 	if (ch->combined_count == 1)
3241 		return;
3242 
3243 	/* we do not support ATR queueing if SR-IOV is enabled */
3244 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3245 		return;
3246 
3247 	/* same thing goes for being DCB enabled */
3248 	if (adapter->hw_tcs > 1)
3249 		return;
3250 
3251 	/* if ATR is disabled we can exit */
3252 	if (!adapter->atr_sample_rate)
3253 		return;
3254 
3255 	/* report flow director queues as maximum channels */
3256 	ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3257 }
3258 
3259 static int ixgbe_set_channels(struct net_device *dev,
3260 			      struct ethtool_channels *ch)
3261 {
3262 	struct ixgbe_adapter *adapter = netdev_priv(dev);
3263 	unsigned int count = ch->combined_count;
3264 	u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3265 
3266 	/* verify they are not requesting separate vectors */
3267 	if (!count || ch->rx_count || ch->tx_count)
3268 		return -EINVAL;
3269 
3270 	/* verify other_count has not changed */
3271 	if (ch->other_count != NON_Q_VECTORS)
3272 		return -EINVAL;
3273 
3274 	/* verify the number of channels does not exceed hardware limits */
3275 	if (count > ixgbe_max_channels(adapter))
3276 		return -EINVAL;
3277 
3278 	/* update feature limits from largest to smallest supported values */
3279 	adapter->ring_feature[RING_F_FDIR].limit = count;
3280 
3281 	/* cap RSS limit */
3282 	if (count > max_rss_indices)
3283 		count = max_rss_indices;
3284 	adapter->ring_feature[RING_F_RSS].limit = count;
3285 
3286 #ifdef IXGBE_FCOE
3287 	/* cap FCoE limit at 8 */
3288 	if (count > IXGBE_FCRETA_SIZE)
3289 		count = IXGBE_FCRETA_SIZE;
3290 	adapter->ring_feature[RING_F_FCOE].limit = count;
3291 
3292 #endif
3293 	/* use setup TC to update any traffic class queue mapping */
3294 	return ixgbe_setup_tc(dev, adapter->hw_tcs);
3295 }
3296 
3297 static int ixgbe_get_module_info(struct net_device *dev,
3298 				       struct ethtool_modinfo *modinfo)
3299 {
3300 	struct ixgbe_adapter *adapter = netdev_priv(dev);
3301 	struct ixgbe_hw *hw = &adapter->hw;
3302 	s32 status;
3303 	u8 sff8472_rev, addr_mode;
3304 	bool page_swap = false;
3305 
3306 	if (hw->phy.type == ixgbe_phy_fw)
3307 		return -ENXIO;
3308 
3309 	/* Check whether we support SFF-8472 or not */
3310 	status = hw->phy.ops.read_i2c_eeprom(hw,
3311 					     IXGBE_SFF_SFF_8472_COMP,
3312 					     &sff8472_rev);
3313 	if (status)
3314 		return -EIO;
3315 
3316 	/* addressing mode is not supported */
3317 	status = hw->phy.ops.read_i2c_eeprom(hw,
3318 					     IXGBE_SFF_SFF_8472_SWAP,
3319 					     &addr_mode);
3320 	if (status)
3321 		return -EIO;
3322 
3323 	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3324 		e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3325 		page_swap = true;
3326 	}
3327 
3328 	if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
3329 	    !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
3330 		/* We have a SFP, but it does not support SFF-8472 */
3331 		modinfo->type = ETH_MODULE_SFF_8079;
3332 		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3333 	} else {
3334 		/* We have a SFP which supports a revision of SFF-8472. */
3335 		modinfo->type = ETH_MODULE_SFF_8472;
3336 		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3337 	}
3338 
3339 	return 0;
3340 }
3341 
3342 static int ixgbe_get_module_eeprom(struct net_device *dev,
3343 					 struct ethtool_eeprom *ee,
3344 					 u8 *data)
3345 {
3346 	struct ixgbe_adapter *adapter = netdev_priv(dev);
3347 	struct ixgbe_hw *hw = &adapter->hw;
3348 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3349 	u8 databyte = 0xFF;
3350 	int i = 0;
3351 
3352 	if (ee->len == 0)
3353 		return -EINVAL;
3354 
3355 	if (hw->phy.type == ixgbe_phy_fw)
3356 		return -ENXIO;
3357 
3358 	for (i = ee->offset; i < ee->offset + ee->len; i++) {
3359 		/* I2C reads can take long time */
3360 		if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3361 			return -EBUSY;
3362 
3363 		if (i < ETH_MODULE_SFF_8079_LEN)
3364 			status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3365 		else
3366 			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3367 
3368 		if (status)
3369 			return -EIO;
3370 
3371 		data[i - ee->offset] = databyte;
3372 	}
3373 
3374 	return 0;
3375 }
3376 
3377 static const struct {
3378 	ixgbe_link_speed mac_speed;
3379 	u32 supported;
3380 } ixgbe_ls_map[] = {
3381 	{ IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3382 	{ IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3383 	{ IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3384 	{ IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3385 	{ IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3386 };
3387 
3388 static const struct {
3389 	u32 lp_advertised;
3390 	u32 mac_speed;
3391 } ixgbe_lp_map[] = {
3392 	{ FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3393 	{ FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3394 	{ FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3395 	{ FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3396 	{ FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3397 	{ FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3398 };
3399 
3400 static int
3401 ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3402 {
3403 	u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3404 	struct ixgbe_hw *hw = &adapter->hw;
3405 	s32 rc;
3406 	u16 i;
3407 
3408 	rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3409 	if (rc)
3410 		return rc;
3411 
3412 	edata->lp_advertised = 0;
3413 	for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3414 		if (info[0] & ixgbe_lp_map[i].lp_advertised)
3415 			edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3416 	}
3417 
3418 	edata->supported = 0;
3419 	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3420 		if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3421 			edata->supported |= ixgbe_ls_map[i].supported;
3422 	}
3423 
3424 	edata->advertised = 0;
3425 	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3426 		if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3427 			edata->advertised |= ixgbe_ls_map[i].supported;
3428 	}
3429 
3430 	edata->eee_enabled = !!edata->advertised;
3431 	edata->tx_lpi_enabled = edata->eee_enabled;
3432 	if (edata->advertised & edata->lp_advertised)
3433 		edata->eee_active = true;
3434 
3435 	return 0;
3436 }
3437 
3438 static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3439 {
3440 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3441 	struct ixgbe_hw *hw = &adapter->hw;
3442 
3443 	if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3444 		return -EOPNOTSUPP;
3445 
3446 	if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3447 		return ixgbe_get_eee_fw(adapter, edata);
3448 
3449 	return -EOPNOTSUPP;
3450 }
3451 
3452 static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3453 {
3454 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3455 	struct ixgbe_hw *hw = &adapter->hw;
3456 	struct ethtool_eee eee_data;
3457 	s32 ret_val;
3458 
3459 	if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3460 		return -EOPNOTSUPP;
3461 
3462 	memset(&eee_data, 0, sizeof(struct ethtool_eee));
3463 
3464 	ret_val = ixgbe_get_eee(netdev, &eee_data);
3465 	if (ret_val)
3466 		return ret_val;
3467 
3468 	if (eee_data.eee_enabled && !edata->eee_enabled) {
3469 		if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3470 			e_err(drv, "Setting EEE tx-lpi is not supported\n");
3471 			return -EINVAL;
3472 		}
3473 
3474 		if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3475 			e_err(drv,
3476 			      "Setting EEE Tx LPI timer is not supported\n");
3477 			return -EINVAL;
3478 		}
3479 
3480 		if (eee_data.advertised != edata->advertised) {
3481 			e_err(drv,
3482 			      "Setting EEE advertised speeds is not supported\n");
3483 			return -EINVAL;
3484 		}
3485 	}
3486 
3487 	if (eee_data.eee_enabled != edata->eee_enabled) {
3488 		if (edata->eee_enabled) {
3489 			adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3490 			hw->phy.eee_speeds_advertised =
3491 						   hw->phy.eee_speeds_supported;
3492 		} else {
3493 			adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3494 			hw->phy.eee_speeds_advertised = 0;
3495 		}
3496 
3497 		/* reset link */
3498 		if (netif_running(netdev))
3499 			ixgbe_reinit_locked(adapter);
3500 		else
3501 			ixgbe_reset(adapter);
3502 	}
3503 
3504 	return 0;
3505 }
3506 
3507 static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3508 {
3509 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3510 	u32 priv_flags = 0;
3511 
3512 	if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3513 		priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3514 
3515 	if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
3516 		priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
3517 
3518 	return priv_flags;
3519 }
3520 
3521 static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3522 {
3523 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3524 	unsigned int flags2 = adapter->flags2;
3525 
3526 	flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3527 	if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3528 		flags2 |= IXGBE_FLAG2_RX_LEGACY;
3529 
3530 	flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
3531 	if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
3532 		flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
3533 
3534 	if (flags2 != adapter->flags2) {
3535 		adapter->flags2 = flags2;
3536 
3537 		/* reset interface to repopulate queues */
3538 		if (netif_running(netdev))
3539 			ixgbe_reinit_locked(adapter);
3540 	}
3541 
3542 	return 0;
3543 }
3544 
3545 static const struct ethtool_ops ixgbe_ethtool_ops = {
3546 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
3547 	.get_drvinfo            = ixgbe_get_drvinfo,
3548 	.get_regs_len           = ixgbe_get_regs_len,
3549 	.get_regs               = ixgbe_get_regs,
3550 	.get_wol                = ixgbe_get_wol,
3551 	.set_wol                = ixgbe_set_wol,
3552 	.nway_reset             = ixgbe_nway_reset,
3553 	.get_link               = ethtool_op_get_link,
3554 	.get_eeprom_len         = ixgbe_get_eeprom_len,
3555 	.get_eeprom             = ixgbe_get_eeprom,
3556 	.set_eeprom             = ixgbe_set_eeprom,
3557 	.get_ringparam          = ixgbe_get_ringparam,
3558 	.set_ringparam          = ixgbe_set_ringparam,
3559 	.get_pause_stats	= ixgbe_get_pause_stats,
3560 	.get_pauseparam         = ixgbe_get_pauseparam,
3561 	.set_pauseparam         = ixgbe_set_pauseparam,
3562 	.get_msglevel           = ixgbe_get_msglevel,
3563 	.set_msglevel           = ixgbe_set_msglevel,
3564 	.self_test              = ixgbe_diag_test,
3565 	.get_strings            = ixgbe_get_strings,
3566 	.set_phys_id            = ixgbe_set_phys_id,
3567 	.get_sset_count         = ixgbe_get_sset_count,
3568 	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
3569 	.get_coalesce           = ixgbe_get_coalesce,
3570 	.set_coalesce           = ixgbe_set_coalesce,
3571 	.get_rxnfc		= ixgbe_get_rxnfc,
3572 	.set_rxnfc		= ixgbe_set_rxnfc,
3573 	.get_rxfh_indir_size	= ixgbe_rss_indir_size,
3574 	.get_rxfh_key_size	= ixgbe_get_rxfh_key_size,
3575 	.get_rxfh		= ixgbe_get_rxfh,
3576 	.set_rxfh		= ixgbe_set_rxfh,
3577 	.get_eee		= ixgbe_get_eee,
3578 	.set_eee		= ixgbe_set_eee,
3579 	.get_channels		= ixgbe_get_channels,
3580 	.set_channels		= ixgbe_set_channels,
3581 	.get_priv_flags		= ixgbe_get_priv_flags,
3582 	.set_priv_flags		= ixgbe_set_priv_flags,
3583 	.get_ts_info		= ixgbe_get_ts_info,
3584 	.get_module_info	= ixgbe_get_module_info,
3585 	.get_module_eeprom	= ixgbe_get_module_eeprom,
3586 	.get_link_ksettings     = ixgbe_get_link_ksettings,
3587 	.set_link_ksettings     = ixgbe_set_link_ksettings,
3588 };
3589 
3590 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3591 {
3592 	netdev->ethtool_ops = &ixgbe_ethtool_ops;
3593 }
3594