xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_ethtool.c (revision a03a8dbe20eff6d57aae3147577bf84b52aba4e6)
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 /* ethtool support for i40e */
28 
29 #include "i40e.h"
30 #include "i40e_diag.h"
31 
32 struct i40e_stats {
33 	char stat_string[ETH_GSTRING_LEN];
34 	int sizeof_stat;
35 	int stat_offset;
36 };
37 
38 #define I40E_STAT(_type, _name, _stat) { \
39 	.stat_string = _name, \
40 	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
41 	.stat_offset = offsetof(_type, _stat) \
42 }
43 
44 #define I40E_NETDEV_STAT(_net_stat) \
45 		I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
46 #define I40E_PF_STAT(_name, _stat) \
47 		I40E_STAT(struct i40e_pf, _name, _stat)
48 #define I40E_VSI_STAT(_name, _stat) \
49 		I40E_STAT(struct i40e_vsi, _name, _stat)
50 #define I40E_VEB_STAT(_name, _stat) \
51 		I40E_STAT(struct i40e_veb, _name, _stat)
52 
53 static const struct i40e_stats i40e_gstrings_net_stats[] = {
54 	I40E_NETDEV_STAT(rx_packets),
55 	I40E_NETDEV_STAT(tx_packets),
56 	I40E_NETDEV_STAT(rx_bytes),
57 	I40E_NETDEV_STAT(tx_bytes),
58 	I40E_NETDEV_STAT(rx_errors),
59 	I40E_NETDEV_STAT(tx_errors),
60 	I40E_NETDEV_STAT(rx_dropped),
61 	I40E_NETDEV_STAT(tx_dropped),
62 	I40E_NETDEV_STAT(collisions),
63 	I40E_NETDEV_STAT(rx_length_errors),
64 	I40E_NETDEV_STAT(rx_crc_errors),
65 };
66 
67 static const struct i40e_stats i40e_gstrings_veb_stats[] = {
68 	I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
69 	I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
70 	I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
71 	I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
72 	I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
73 	I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
74 	I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
75 	I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
76 	I40E_VEB_STAT("rx_discards", stats.rx_discards),
77 	I40E_VEB_STAT("tx_discards", stats.tx_discards),
78 	I40E_VEB_STAT("tx_errors", stats.tx_errors),
79 	I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
80 };
81 
82 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
83 	I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
84 	I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
85 	I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
86 	I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
87 	I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
88 	I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
89 	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
90 };
91 
92 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
93 				 struct ethtool_rxnfc *cmd);
94 
95 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
96  * but they are separate.  This device supports Virtualization, and
97  * as such might have several netdevs supporting VMDq and FCoE going
98  * through a single port.  The NETDEV_STATs are for individual netdevs
99  * seen at the top of the stack, and the PF_STATs are for the physical
100  * function at the bottom of the stack hosting those netdevs.
101  *
102  * The PF_STATs are appended to the netdev stats only when ethtool -S
103  * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
104  */
105 static struct i40e_stats i40e_gstrings_stats[] = {
106 	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
107 	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
108 	I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
109 	I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
110 	I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
111 	I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
112 	I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
113 	I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
114 	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
115 	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
116 	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
117 	I40E_PF_STAT("crc_errors", stats.crc_errors),
118 	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
119 	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
120 	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
121 	I40E_PF_STAT("tx_timeout", tx_timeout_count),
122 	I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
123 	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
124 	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
125 	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
126 	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
127 	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
128 	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
129 	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
130 	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
131 	I40E_PF_STAT("rx_size_511", stats.rx_size_511),
132 	I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
133 	I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
134 	I40E_PF_STAT("rx_size_big", stats.rx_size_big),
135 	I40E_PF_STAT("tx_size_64", stats.tx_size_64),
136 	I40E_PF_STAT("tx_size_127", stats.tx_size_127),
137 	I40E_PF_STAT("tx_size_255", stats.tx_size_255),
138 	I40E_PF_STAT("tx_size_511", stats.tx_size_511),
139 	I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
140 	I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
141 	I40E_PF_STAT("tx_size_big", stats.tx_size_big),
142 	I40E_PF_STAT("rx_undersize", stats.rx_undersize),
143 	I40E_PF_STAT("rx_fragments", stats.rx_fragments),
144 	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
145 	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
146 	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
147 	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
149 	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
150 	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
151 
152 	/* LPI stats */
153 	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
154 	I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
155 	I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
156 	I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
157 };
158 
159 #ifdef I40E_FCOE
160 static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
161 	I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
162 	I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
163 	I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
164 	I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
165 	I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
166 	I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
167 	I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
168 	I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
169 };
170 
171 #endif /* I40E_FCOE */
172 #define I40E_QUEUE_STATS_LEN(n) \
173 	(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
174 	    * 2 /* Tx and Rx together */                                     \
175 	    * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
176 #define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
177 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
178 #define I40E_MISC_STATS_LEN	ARRAY_SIZE(i40e_gstrings_misc_stats)
179 #ifdef I40E_FCOE
180 #define I40E_FCOE_STATS_LEN	ARRAY_SIZE(i40e_gstrings_fcoe_stats)
181 #define I40E_VSI_STATS_LEN(n)	(I40E_NETDEV_STATS_LEN + \
182 				 I40E_FCOE_STATS_LEN + \
183 				 I40E_MISC_STATS_LEN + \
184 				 I40E_QUEUE_STATS_LEN((n)))
185 #else
186 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
187 				 I40E_MISC_STATS_LEN + \
188 				 I40E_QUEUE_STATS_LEN((n)))
189 #endif /* I40E_FCOE */
190 #define I40E_PFC_STATS_LEN ( \
191 		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
192 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
193 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
194 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
195 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
196 		 / sizeof(u64))
197 #define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)
198 #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
199 				 I40E_PFC_STATS_LEN + \
200 				 I40E_VSI_STATS_LEN((n)))
201 
202 enum i40e_ethtool_test_id {
203 	I40E_ETH_TEST_REG = 0,
204 	I40E_ETH_TEST_EEPROM,
205 	I40E_ETH_TEST_INTR,
206 	I40E_ETH_TEST_LOOPBACK,
207 	I40E_ETH_TEST_LINK,
208 };
209 
210 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
211 	"Register test  (offline)",
212 	"Eeprom test    (offline)",
213 	"Interrupt test (offline)",
214 	"Loopback test  (offline)",
215 	"Link test   (on/offline)"
216 };
217 
218 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
219 
220 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
221 	"NPAR",
222 };
223 
224 #define I40E_PRIV_FLAGS_STR_LEN \
225 	(sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
226 
227 /**
228  * i40e_partition_setting_complaint - generic complaint for MFP restriction
229  * @pf: the PF struct
230  **/
231 static void i40e_partition_setting_complaint(struct i40e_pf *pf)
232 {
233 	dev_info(&pf->pdev->dev,
234 		 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
235 }
236 
237 /**
238  * i40e_get_settings_link_up - Get the Link settings for when link is up
239  * @hw: hw structure
240  * @ecmd: ethtool command to fill in
241  * @netdev: network interface device structure
242  *
243  **/
244 static void i40e_get_settings_link_up(struct i40e_hw *hw,
245 				      struct ethtool_cmd *ecmd,
246 				      struct net_device *netdev)
247 {
248 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
249 	u32 link_speed = hw_link_info->link_speed;
250 
251 	/* Initialize supported and advertised settings based on phy settings */
252 	switch (hw_link_info->phy_type) {
253 	case I40E_PHY_TYPE_40GBASE_CR4:
254 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
255 		ecmd->supported = SUPPORTED_Autoneg |
256 				  SUPPORTED_40000baseCR4_Full;
257 		ecmd->advertising = ADVERTISED_Autoneg |
258 				    ADVERTISED_40000baseCR4_Full;
259 		break;
260 	case I40E_PHY_TYPE_XLAUI:
261 	case I40E_PHY_TYPE_XLPPI:
262 	case I40E_PHY_TYPE_40GBASE_AOC:
263 		ecmd->supported = SUPPORTED_40000baseCR4_Full;
264 		break;
265 	case I40E_PHY_TYPE_40GBASE_KR4:
266 		ecmd->supported = SUPPORTED_Autoneg |
267 				  SUPPORTED_40000baseKR4_Full;
268 		ecmd->advertising = ADVERTISED_Autoneg |
269 				    ADVERTISED_40000baseKR4_Full;
270 		break;
271 	case I40E_PHY_TYPE_40GBASE_SR4:
272 		ecmd->supported = SUPPORTED_40000baseSR4_Full;
273 		break;
274 	case I40E_PHY_TYPE_40GBASE_LR4:
275 		ecmd->supported = SUPPORTED_40000baseLR4_Full;
276 		break;
277 	case I40E_PHY_TYPE_10GBASE_KX4:
278 		ecmd->supported = SUPPORTED_Autoneg |
279 				  SUPPORTED_10000baseKX4_Full;
280 		ecmd->advertising = ADVERTISED_Autoneg |
281 				    ADVERTISED_10000baseKX4_Full;
282 		break;
283 	case I40E_PHY_TYPE_10GBASE_KR:
284 		ecmd->supported = SUPPORTED_Autoneg |
285 				  SUPPORTED_10000baseKR_Full;
286 		ecmd->advertising = ADVERTISED_Autoneg |
287 				    ADVERTISED_10000baseKR_Full;
288 		break;
289 	case I40E_PHY_TYPE_10GBASE_SR:
290 	case I40E_PHY_TYPE_10GBASE_LR:
291 	case I40E_PHY_TYPE_1000BASE_SX:
292 	case I40E_PHY_TYPE_1000BASE_LX:
293 		ecmd->supported = SUPPORTED_10000baseT_Full |
294 				  SUPPORTED_1000baseT_Full;
295 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
296 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
297 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
298 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
299 		break;
300 	case I40E_PHY_TYPE_1000BASE_KX:
301 		ecmd->supported = SUPPORTED_Autoneg |
302 				  SUPPORTED_1000baseKX_Full;
303 		ecmd->advertising = ADVERTISED_Autoneg |
304 				    ADVERTISED_1000baseKX_Full;
305 		break;
306 	case I40E_PHY_TYPE_10GBASE_T:
307 	case I40E_PHY_TYPE_1000BASE_T:
308 	case I40E_PHY_TYPE_100BASE_TX:
309 		ecmd->supported = SUPPORTED_Autoneg |
310 				  SUPPORTED_10000baseT_Full |
311 				  SUPPORTED_1000baseT_Full |
312 				  SUPPORTED_100baseT_Full;
313 		ecmd->advertising = ADVERTISED_Autoneg;
314 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
315 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
316 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
317 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
318 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
319 			ecmd->advertising |= ADVERTISED_100baseT_Full;
320 		break;
321 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
322 	case I40E_PHY_TYPE_10GBASE_CR1:
323 		ecmd->supported = SUPPORTED_Autoneg |
324 				  SUPPORTED_10000baseT_Full;
325 		ecmd->advertising = ADVERTISED_Autoneg |
326 				    ADVERTISED_10000baseT_Full;
327 		break;
328 	case I40E_PHY_TYPE_XAUI:
329 	case I40E_PHY_TYPE_XFI:
330 	case I40E_PHY_TYPE_SFI:
331 	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
332 	case I40E_PHY_TYPE_10GBASE_AOC:
333 		ecmd->supported = SUPPORTED_10000baseT_Full;
334 		break;
335 	case I40E_PHY_TYPE_SGMII:
336 		ecmd->supported = SUPPORTED_Autoneg |
337 				  SUPPORTED_1000baseT_Full |
338 				  SUPPORTED_100baseT_Full;
339 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
340 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
341 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
342 			ecmd->advertising |= ADVERTISED_100baseT_Full;
343 		break;
344 	default:
345 		/* if we got here and link is up something bad is afoot */
346 		netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
347 			    hw_link_info->phy_type);
348 	}
349 
350 	/* Set speed and duplex */
351 	switch (link_speed) {
352 	case I40E_LINK_SPEED_40GB:
353 		/* need a SPEED_40000 in ethtool.h */
354 		ethtool_cmd_speed_set(ecmd, 40000);
355 		break;
356 	case I40E_LINK_SPEED_10GB:
357 		ethtool_cmd_speed_set(ecmd, SPEED_10000);
358 		break;
359 	case I40E_LINK_SPEED_1GB:
360 		ethtool_cmd_speed_set(ecmd, SPEED_1000);
361 		break;
362 	case I40E_LINK_SPEED_100MB:
363 		ethtool_cmd_speed_set(ecmd, SPEED_100);
364 		break;
365 	default:
366 		break;
367 	}
368 	ecmd->duplex = DUPLEX_FULL;
369 }
370 
371 /**
372  * i40e_get_settings_link_down - Get the Link settings for when link is down
373  * @hw: hw structure
374  * @ecmd: ethtool command to fill in
375  *
376  * Reports link settings that can be determined when link is down
377  **/
378 static void i40e_get_settings_link_down(struct i40e_hw *hw,
379 					struct ethtool_cmd *ecmd)
380 {
381 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
382 
383 	/* link is down and the driver needs to fall back on
384 	 * device ID to determine what kinds of info to display,
385 	 * it's mostly a guess that may change when link is up
386 	 */
387 	switch (hw->device_id) {
388 	case I40E_DEV_ID_QSFP_A:
389 	case I40E_DEV_ID_QSFP_B:
390 	case I40E_DEV_ID_QSFP_C:
391 		/* pluggable QSFP */
392 		ecmd->supported = SUPPORTED_40000baseSR4_Full |
393 				  SUPPORTED_40000baseCR4_Full |
394 				  SUPPORTED_40000baseLR4_Full;
395 		ecmd->advertising = ADVERTISED_40000baseSR4_Full |
396 				    ADVERTISED_40000baseCR4_Full |
397 				    ADVERTISED_40000baseLR4_Full;
398 		break;
399 	case I40E_DEV_ID_KX_B:
400 		/* backplane 40G */
401 		ecmd->supported = SUPPORTED_40000baseKR4_Full;
402 		ecmd->advertising = ADVERTISED_40000baseKR4_Full;
403 		break;
404 	case I40E_DEV_ID_KX_C:
405 		/* backplane 10G */
406 		ecmd->supported = SUPPORTED_10000baseKR_Full;
407 		ecmd->advertising = ADVERTISED_10000baseKR_Full;
408 		break;
409 	case I40E_DEV_ID_10G_BASE_T:
410 		ecmd->supported = SUPPORTED_10000baseT_Full |
411 				  SUPPORTED_1000baseT_Full |
412 				  SUPPORTED_100baseT_Full;
413 		/* Figure out what has been requested */
414 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
415 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
416 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
417 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
418 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
419 			ecmd->advertising |= ADVERTISED_100baseT_Full;
420 		break;
421 	default:
422 		/* all the rest are 10G/1G */
423 		ecmd->supported = SUPPORTED_10000baseT_Full |
424 				  SUPPORTED_1000baseT_Full;
425 		/* Figure out what has been requested */
426 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
427 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
428 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
429 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
430 		break;
431 	}
432 
433 	/* With no link speed and duplex are unknown */
434 	ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
435 	ecmd->duplex = DUPLEX_UNKNOWN;
436 }
437 
438 /**
439  * i40e_get_settings - Get Link Speed and Duplex settings
440  * @netdev: network interface device structure
441  * @ecmd: ethtool command
442  *
443  * Reports speed/duplex settings based on media_type
444  **/
445 static int i40e_get_settings(struct net_device *netdev,
446 			     struct ethtool_cmd *ecmd)
447 {
448 	struct i40e_netdev_priv *np = netdev_priv(netdev);
449 	struct i40e_pf *pf = np->vsi->back;
450 	struct i40e_hw *hw = &pf->hw;
451 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
452 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
453 
454 	if (link_up)
455 		i40e_get_settings_link_up(hw, ecmd, netdev);
456 	else
457 		i40e_get_settings_link_down(hw, ecmd);
458 
459 	/* Now set the settings that don't rely on link being up/down */
460 
461 	/* Set autoneg settings */
462 	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
463 			  AUTONEG_ENABLE : AUTONEG_DISABLE);
464 
465 	switch (hw->phy.media_type) {
466 	case I40E_MEDIA_TYPE_BACKPLANE:
467 		ecmd->supported |= SUPPORTED_Autoneg |
468 				   SUPPORTED_Backplane;
469 		ecmd->advertising |= ADVERTISED_Autoneg |
470 				     ADVERTISED_Backplane;
471 		ecmd->port = PORT_NONE;
472 		break;
473 	case I40E_MEDIA_TYPE_BASET:
474 		ecmd->supported |= SUPPORTED_TP;
475 		ecmd->advertising |= ADVERTISED_TP;
476 		ecmd->port = PORT_TP;
477 		break;
478 	case I40E_MEDIA_TYPE_DA:
479 	case I40E_MEDIA_TYPE_CX4:
480 		ecmd->supported |= SUPPORTED_FIBRE;
481 		ecmd->advertising |= ADVERTISED_FIBRE;
482 		ecmd->port = PORT_DA;
483 		break;
484 	case I40E_MEDIA_TYPE_FIBER:
485 		ecmd->supported |= SUPPORTED_FIBRE;
486 		ecmd->port = PORT_FIBRE;
487 		break;
488 	case I40E_MEDIA_TYPE_UNKNOWN:
489 	default:
490 		ecmd->port = PORT_OTHER;
491 		break;
492 	}
493 
494 	/* Set transceiver */
495 	ecmd->transceiver = XCVR_EXTERNAL;
496 
497 	/* Set flow control settings */
498 	ecmd->supported |= SUPPORTED_Pause;
499 
500 	switch (hw->fc.requested_mode) {
501 	case I40E_FC_FULL:
502 		ecmd->advertising |= ADVERTISED_Pause;
503 		break;
504 	case I40E_FC_TX_PAUSE:
505 		ecmd->advertising |= ADVERTISED_Asym_Pause;
506 		break;
507 	case I40E_FC_RX_PAUSE:
508 		ecmd->advertising |= (ADVERTISED_Pause |
509 				      ADVERTISED_Asym_Pause);
510 		break;
511 	default:
512 		ecmd->advertising &= ~(ADVERTISED_Pause |
513 				       ADVERTISED_Asym_Pause);
514 		break;
515 	}
516 
517 	return 0;
518 }
519 
520 /**
521  * i40e_set_settings - Set Speed and Duplex
522  * @netdev: network interface device structure
523  * @ecmd: ethtool command
524  *
525  * Set speed/duplex per media_types advertised/forced
526  **/
527 static int i40e_set_settings(struct net_device *netdev,
528 			     struct ethtool_cmd *ecmd)
529 {
530 	struct i40e_netdev_priv *np = netdev_priv(netdev);
531 	struct i40e_aq_get_phy_abilities_resp abilities;
532 	struct i40e_aq_set_phy_config config;
533 	struct i40e_pf *pf = np->vsi->back;
534 	struct i40e_vsi *vsi = np->vsi;
535 	struct i40e_hw *hw = &pf->hw;
536 	struct ethtool_cmd safe_ecmd;
537 	i40e_status status = 0;
538 	bool change = false;
539 	int err = 0;
540 	u8 autoneg;
541 	u32 advertise;
542 
543 	/* Changing port settings is not supported if this isn't the
544 	 * port's controlling PF
545 	 */
546 	if (hw->partition_id != 1) {
547 		i40e_partition_setting_complaint(pf);
548 		return -EOPNOTSUPP;
549 	}
550 
551 	if (vsi != pf->vsi[pf->lan_vsi])
552 		return -EOPNOTSUPP;
553 
554 	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
555 	    hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
556 	    hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
557 	    hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
558 		return -EOPNOTSUPP;
559 
560 	/* get our own copy of the bits to check against */
561 	memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
562 	i40e_get_settings(netdev, &safe_ecmd);
563 
564 	/* save autoneg and speed out of ecmd */
565 	autoneg = ecmd->autoneg;
566 	advertise = ecmd->advertising;
567 
568 	/* set autoneg and speed back to what they currently are */
569 	ecmd->autoneg = safe_ecmd.autoneg;
570 	ecmd->advertising = safe_ecmd.advertising;
571 
572 	ecmd->cmd = safe_ecmd.cmd;
573 	/* If ecmd and safe_ecmd are not the same now, then they are
574 	 * trying to set something that we do not support
575 	 */
576 	if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
577 		return -EOPNOTSUPP;
578 
579 	while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
580 		usleep_range(1000, 2000);
581 
582 	/* Get the current phy config */
583 	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
584 					      NULL);
585 	if (status)
586 		return -EAGAIN;
587 
588 	/* Copy abilities to config in case autoneg is not
589 	 * set below
590 	 */
591 	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
592 	config.abilities = abilities.abilities;
593 
594 	/* Check autoneg */
595 	if (autoneg == AUTONEG_ENABLE) {
596 		/* If autoneg is not supported, return error */
597 		if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
598 			netdev_info(netdev, "Autoneg not supported on this phy\n");
599 			return -EINVAL;
600 		}
601 		/* If autoneg was not already enabled */
602 		if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
603 			config.abilities = abilities.abilities |
604 					   I40E_AQ_PHY_ENABLE_AN;
605 			change = true;
606 		}
607 	} else {
608 		/* If autoneg is supported 10GBASE_T is the only phy that
609 		 * can disable it, so otherwise return error
610 		 */
611 		if (safe_ecmd.supported & SUPPORTED_Autoneg &&
612 		    hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
613 			netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
614 			return -EINVAL;
615 		}
616 		/* If autoneg is currently enabled */
617 		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
618 			config.abilities = abilities.abilities &
619 					   ~I40E_AQ_PHY_ENABLE_AN;
620 			change = true;
621 		}
622 	}
623 
624 	if (advertise & ~safe_ecmd.supported)
625 		return -EINVAL;
626 
627 	if (advertise & ADVERTISED_100baseT_Full)
628 		config.link_speed |= I40E_LINK_SPEED_100MB;
629 	if (advertise & ADVERTISED_1000baseT_Full ||
630 	    advertise & ADVERTISED_1000baseKX_Full)
631 		config.link_speed |= I40E_LINK_SPEED_1GB;
632 	if (advertise & ADVERTISED_10000baseT_Full ||
633 	    advertise & ADVERTISED_10000baseKX4_Full ||
634 	    advertise & ADVERTISED_10000baseKR_Full)
635 		config.link_speed |= I40E_LINK_SPEED_10GB;
636 	if (advertise & ADVERTISED_40000baseKR4_Full ||
637 	    advertise & ADVERTISED_40000baseCR4_Full ||
638 	    advertise & ADVERTISED_40000baseSR4_Full ||
639 	    advertise & ADVERTISED_40000baseLR4_Full)
640 		config.link_speed |= I40E_LINK_SPEED_40GB;
641 
642 	if (change || (abilities.link_speed != config.link_speed)) {
643 		/* copy over the rest of the abilities */
644 		config.phy_type = abilities.phy_type;
645 		config.eee_capability = abilities.eee_capability;
646 		config.eeer = abilities.eeer_val;
647 		config.low_power_ctrl = abilities.d3_lpan;
648 
649 		/* save the requested speeds */
650 		hw->phy.link_info.requested_speeds = config.link_speed;
651 		/* set link and auto negotiation so changes take effect */
652 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
653 		/* If link is up put link down */
654 		if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
655 			/* Tell the OS link is going down, the link will go
656 			 * back up when fw says it is ready asynchronously
657 			 */
658 			netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
659 			netif_carrier_off(netdev);
660 			netif_tx_stop_all_queues(netdev);
661 		}
662 
663 		/* make the aq call */
664 		status = i40e_aq_set_phy_config(hw, &config, NULL);
665 		if (status) {
666 			netdev_info(netdev, "Set phy config failed with error %d.\n",
667 				    status);
668 			return -EAGAIN;
669 		}
670 
671 		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
672 		if (status)
673 			netdev_info(netdev, "Updating link info failed with error %d\n",
674 				    status);
675 
676 	} else {
677 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
678 	}
679 
680 	return err;
681 }
682 
683 static int i40e_nway_reset(struct net_device *netdev)
684 {
685 	/* restart autonegotiation */
686 	struct i40e_netdev_priv *np = netdev_priv(netdev);
687 	struct i40e_pf *pf = np->vsi->back;
688 	struct i40e_hw *hw = &pf->hw;
689 	bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
690 	i40e_status ret = 0;
691 
692 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
693 	if (ret) {
694 		netdev_info(netdev, "link restart failed, aq_err=%d\n",
695 			    pf->hw.aq.asq_last_status);
696 		return -EIO;
697 	}
698 
699 	return 0;
700 }
701 
702 /**
703  * i40e_get_pauseparam -  Get Flow Control status
704  * Return tx/rx-pause status
705  **/
706 static void i40e_get_pauseparam(struct net_device *netdev,
707 				struct ethtool_pauseparam *pause)
708 {
709 	struct i40e_netdev_priv *np = netdev_priv(netdev);
710 	struct i40e_pf *pf = np->vsi->back;
711 	struct i40e_hw *hw = &pf->hw;
712 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
713 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
714 
715 	pause->autoneg =
716 		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
717 		  AUTONEG_ENABLE : AUTONEG_DISABLE);
718 
719 	/* PFC enabled so report LFC as off */
720 	if (dcbx_cfg->pfc.pfcenable) {
721 		pause->rx_pause = 0;
722 		pause->tx_pause = 0;
723 		return;
724 	}
725 
726 	if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
727 		pause->rx_pause = 1;
728 	} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
729 		pause->tx_pause = 1;
730 	} else if (hw->fc.current_mode == I40E_FC_FULL) {
731 		pause->rx_pause = 1;
732 		pause->tx_pause = 1;
733 	}
734 }
735 
736 /**
737  * i40e_set_pauseparam - Set Flow Control parameter
738  * @netdev: network interface device structure
739  * @pause: return tx/rx flow control status
740  **/
741 static int i40e_set_pauseparam(struct net_device *netdev,
742 			       struct ethtool_pauseparam *pause)
743 {
744 	struct i40e_netdev_priv *np = netdev_priv(netdev);
745 	struct i40e_pf *pf = np->vsi->back;
746 	struct i40e_vsi *vsi = np->vsi;
747 	struct i40e_hw *hw = &pf->hw;
748 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
749 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
750 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
751 	i40e_status status;
752 	u8 aq_failures;
753 	int err = 0;
754 
755 	/* Changing the port's flow control is not supported if this isn't the
756 	 * port's controlling PF
757 	 */
758 	if (hw->partition_id != 1) {
759 		i40e_partition_setting_complaint(pf);
760 		return -EOPNOTSUPP;
761 	}
762 
763 	if (vsi != pf->vsi[pf->lan_vsi])
764 		return -EOPNOTSUPP;
765 
766 	if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
767 	    AUTONEG_ENABLE : AUTONEG_DISABLE)) {
768 		netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
769 		return -EOPNOTSUPP;
770 	}
771 
772 	/* If we have link and don't have autoneg */
773 	if (!test_bit(__I40E_DOWN, &pf->state) &&
774 	    !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
775 		/* Send message that it might not necessarily work*/
776 		netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
777 	}
778 
779 	if (dcbx_cfg->pfc.pfcenable) {
780 		netdev_info(netdev,
781 			    "Priority flow control enabled. Cannot set link flow control.\n");
782 		return -EOPNOTSUPP;
783 	}
784 
785 	if (pause->rx_pause && pause->tx_pause)
786 		hw->fc.requested_mode = I40E_FC_FULL;
787 	else if (pause->rx_pause && !pause->tx_pause)
788 		hw->fc.requested_mode = I40E_FC_RX_PAUSE;
789 	else if (!pause->rx_pause && pause->tx_pause)
790 		hw->fc.requested_mode = I40E_FC_TX_PAUSE;
791 	else if (!pause->rx_pause && !pause->tx_pause)
792 		hw->fc.requested_mode = I40E_FC_NONE;
793 	else
794 		 return -EINVAL;
795 
796 	/* Tell the OS link is going down, the link will go back up when fw
797 	 * says it is ready asynchronously
798 	 */
799 	netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
800 	netif_carrier_off(netdev);
801 	netif_tx_stop_all_queues(netdev);
802 
803 	/* Set the fc mode and only restart an if link is up*/
804 	status = i40e_set_fc(hw, &aq_failures, link_up);
805 
806 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
807 		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
808 			    status, hw->aq.asq_last_status);
809 		err = -EAGAIN;
810 	}
811 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
812 		netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
813 			    status, hw->aq.asq_last_status);
814 		err = -EAGAIN;
815 	}
816 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
817 		netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
818 			    status, hw->aq.asq_last_status);
819 		err = -EAGAIN;
820 	}
821 
822 	if (!test_bit(__I40E_DOWN, &pf->state)) {
823 		/* Give it a little more time to try to come back */
824 		msleep(75);
825 		if (!test_bit(__I40E_DOWN, &pf->state))
826 			return i40e_nway_reset(netdev);
827 	}
828 
829 	return err;
830 }
831 
832 static u32 i40e_get_msglevel(struct net_device *netdev)
833 {
834 	struct i40e_netdev_priv *np = netdev_priv(netdev);
835 	struct i40e_pf *pf = np->vsi->back;
836 
837 	return pf->msg_enable;
838 }
839 
840 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
841 {
842 	struct i40e_netdev_priv *np = netdev_priv(netdev);
843 	struct i40e_pf *pf = np->vsi->back;
844 
845 	if (I40E_DEBUG_USER & data)
846 		pf->hw.debug_mask = data;
847 	pf->msg_enable = data;
848 }
849 
850 static int i40e_get_regs_len(struct net_device *netdev)
851 {
852 	int reg_count = 0;
853 	int i;
854 
855 	for (i = 0; i40e_reg_list[i].offset != 0; i++)
856 		reg_count += i40e_reg_list[i].elements;
857 
858 	return reg_count * sizeof(u32);
859 }
860 
861 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
862 			  void *p)
863 {
864 	struct i40e_netdev_priv *np = netdev_priv(netdev);
865 	struct i40e_pf *pf = np->vsi->back;
866 	struct i40e_hw *hw = &pf->hw;
867 	u32 *reg_buf = p;
868 	int i, j, ri;
869 	u32 reg;
870 
871 	/* Tell ethtool which driver-version-specific regs output we have.
872 	 *
873 	 * At some point, if we have ethtool doing special formatting of
874 	 * this data, it will rely on this version number to know how to
875 	 * interpret things.  Hence, this needs to be updated if/when the
876 	 * diags register table is changed.
877 	 */
878 	regs->version = 1;
879 
880 	/* loop through the diags reg table for what to print */
881 	ri = 0;
882 	for (i = 0; i40e_reg_list[i].offset != 0; i++) {
883 		for (j = 0; j < i40e_reg_list[i].elements; j++) {
884 			reg = i40e_reg_list[i].offset
885 				+ (j * i40e_reg_list[i].stride);
886 			reg_buf[ri++] = rd32(hw, reg);
887 		}
888 	}
889 
890 }
891 
892 static int i40e_get_eeprom(struct net_device *netdev,
893 			   struct ethtool_eeprom *eeprom, u8 *bytes)
894 {
895 	struct i40e_netdev_priv *np = netdev_priv(netdev);
896 	struct i40e_hw *hw = &np->vsi->back->hw;
897 	struct i40e_pf *pf = np->vsi->back;
898 	int ret_val = 0, len, offset;
899 	u8 *eeprom_buff;
900 	u16 i, sectors;
901 	bool last;
902 	u32 magic;
903 
904 #define I40E_NVM_SECTOR_SIZE  4096
905 	if (eeprom->len == 0)
906 		return -EINVAL;
907 
908 	/* check for NVMUpdate access method */
909 	magic = hw->vendor_id | (hw->device_id << 16);
910 	if (eeprom->magic && eeprom->magic != magic) {
911 		struct i40e_nvm_access *cmd;
912 		int errno;
913 
914 		/* make sure it is the right magic for NVMUpdate */
915 		if ((eeprom->magic >> 16) != hw->device_id)
916 			return -EINVAL;
917 
918 		cmd = (struct i40e_nvm_access *)eeprom;
919 		ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
920 		if (ret_val &&
921 		    ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
922 		     (hw->debug_mask & I40E_DEBUG_NVM)))
923 			dev_info(&pf->pdev->dev,
924 				 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
925 				 ret_val, hw->aq.asq_last_status, errno,
926 				 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
927 				 cmd->offset, cmd->data_size);
928 
929 		return errno;
930 	}
931 
932 	/* normal ethtool get_eeprom support */
933 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
934 
935 	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
936 	if (!eeprom_buff)
937 		return -ENOMEM;
938 
939 	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
940 	if (ret_val) {
941 		dev_info(&pf->pdev->dev,
942 			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
943 			 ret_val, hw->aq.asq_last_status);
944 		goto free_buff;
945 	}
946 
947 	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
948 	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
949 	len = I40E_NVM_SECTOR_SIZE;
950 	last = false;
951 	for (i = 0; i < sectors; i++) {
952 		if (i == (sectors - 1)) {
953 			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
954 			last = true;
955 		}
956 		offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
957 		ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
958 				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
959 				last, NULL);
960 		if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
961 			dev_info(&pf->pdev->dev,
962 				 "read NVM failed, invalid offset 0x%x\n",
963 				 offset);
964 			break;
965 		} else if (ret_val &&
966 			   hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
967 			dev_info(&pf->pdev->dev,
968 				 "read NVM failed, access, offset 0x%x\n",
969 				 offset);
970 			break;
971 		} else if (ret_val) {
972 			dev_info(&pf->pdev->dev,
973 				 "read NVM failed offset %d err=%d status=0x%x\n",
974 				 offset, ret_val, hw->aq.asq_last_status);
975 			break;
976 		}
977 	}
978 
979 	i40e_release_nvm(hw);
980 	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
981 free_buff:
982 	kfree(eeprom_buff);
983 	return ret_val;
984 }
985 
986 static int i40e_get_eeprom_len(struct net_device *netdev)
987 {
988 	struct i40e_netdev_priv *np = netdev_priv(netdev);
989 	struct i40e_hw *hw = &np->vsi->back->hw;
990 	u32 val;
991 
992 	val = (rd32(hw, I40E_GLPCI_LBARCTRL)
993 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
994 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
995 	/* register returns value in power of 2, 64Kbyte chunks. */
996 	val = (64 * 1024) * (1 << val);
997 	return val;
998 }
999 
1000 static int i40e_set_eeprom(struct net_device *netdev,
1001 			   struct ethtool_eeprom *eeprom, u8 *bytes)
1002 {
1003 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1004 	struct i40e_hw *hw = &np->vsi->back->hw;
1005 	struct i40e_pf *pf = np->vsi->back;
1006 	struct i40e_nvm_access *cmd;
1007 	int ret_val = 0;
1008 	int errno;
1009 	u32 magic;
1010 
1011 	/* normal ethtool set_eeprom is not supported */
1012 	magic = hw->vendor_id | (hw->device_id << 16);
1013 	if (eeprom->magic == magic)
1014 		return -EOPNOTSUPP;
1015 
1016 	/* check for NVMUpdate access method */
1017 	if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
1018 		return -EINVAL;
1019 
1020 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
1021 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
1022 		return -EBUSY;
1023 
1024 	cmd = (struct i40e_nvm_access *)eeprom;
1025 	ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
1026 	if (ret_val &&
1027 	    ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
1028 	      hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
1029 	     (hw->debug_mask & I40E_DEBUG_NVM)))
1030 		dev_info(&pf->pdev->dev,
1031 			 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
1032 			 ret_val, hw->aq.asq_last_status, errno,
1033 			 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
1034 			 cmd->offset, cmd->data_size);
1035 
1036 	return errno;
1037 }
1038 
1039 static void i40e_get_drvinfo(struct net_device *netdev,
1040 			     struct ethtool_drvinfo *drvinfo)
1041 {
1042 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1043 	struct i40e_vsi *vsi = np->vsi;
1044 	struct i40e_pf *pf = vsi->back;
1045 
1046 	strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
1047 	strlcpy(drvinfo->version, i40e_driver_version_str,
1048 		sizeof(drvinfo->version));
1049 	strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
1050 		sizeof(drvinfo->fw_version));
1051 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
1052 		sizeof(drvinfo->bus_info));
1053 	drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
1054 }
1055 
1056 static void i40e_get_ringparam(struct net_device *netdev,
1057 			       struct ethtool_ringparam *ring)
1058 {
1059 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1060 	struct i40e_pf *pf = np->vsi->back;
1061 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
1062 
1063 	ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1064 	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1065 	ring->rx_mini_max_pending = 0;
1066 	ring->rx_jumbo_max_pending = 0;
1067 	ring->rx_pending = vsi->rx_rings[0]->count;
1068 	ring->tx_pending = vsi->tx_rings[0]->count;
1069 	ring->rx_mini_pending = 0;
1070 	ring->rx_jumbo_pending = 0;
1071 }
1072 
1073 static int i40e_set_ringparam(struct net_device *netdev,
1074 			      struct ethtool_ringparam *ring)
1075 {
1076 	struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
1077 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1078 	struct i40e_vsi *vsi = np->vsi;
1079 	struct i40e_pf *pf = vsi->back;
1080 	u32 new_rx_count, new_tx_count;
1081 	int i, err = 0;
1082 
1083 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1084 		return -EINVAL;
1085 
1086 	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1087 	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
1088 	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1089 	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
1090 		netdev_info(netdev,
1091 			    "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
1092 			    ring->tx_pending, ring->rx_pending,
1093 			    I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
1094 		return -EINVAL;
1095 	}
1096 
1097 	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1098 	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1099 
1100 	/* if nothing to do return success */
1101 	if ((new_tx_count == vsi->tx_rings[0]->count) &&
1102 	    (new_rx_count == vsi->rx_rings[0]->count))
1103 		return 0;
1104 
1105 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
1106 		usleep_range(1000, 2000);
1107 
1108 	if (!netif_running(vsi->netdev)) {
1109 		/* simple case - set for the next time the netdev is started */
1110 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1111 			vsi->tx_rings[i]->count = new_tx_count;
1112 			vsi->rx_rings[i]->count = new_rx_count;
1113 		}
1114 		goto done;
1115 	}
1116 
1117 	/* We can't just free everything and then setup again,
1118 	 * because the ISRs in MSI-X mode get passed pointers
1119 	 * to the Tx and Rx ring structs.
1120 	 */
1121 
1122 	/* alloc updated Tx resources */
1123 	if (new_tx_count != vsi->tx_rings[0]->count) {
1124 		netdev_info(netdev,
1125 			    "Changing Tx descriptor count from %d to %d.\n",
1126 			    vsi->tx_rings[0]->count, new_tx_count);
1127 		tx_rings = kcalloc(vsi->alloc_queue_pairs,
1128 				   sizeof(struct i40e_ring), GFP_KERNEL);
1129 		if (!tx_rings) {
1130 			err = -ENOMEM;
1131 			goto done;
1132 		}
1133 
1134 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1135 			/* clone ring and setup updated count */
1136 			tx_rings[i] = *vsi->tx_rings[i];
1137 			tx_rings[i].count = new_tx_count;
1138 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
1139 			if (err) {
1140 				while (i) {
1141 					i--;
1142 					i40e_free_tx_resources(&tx_rings[i]);
1143 				}
1144 				kfree(tx_rings);
1145 				tx_rings = NULL;
1146 
1147 				goto done;
1148 			}
1149 		}
1150 	}
1151 
1152 	/* alloc updated Rx resources */
1153 	if (new_rx_count != vsi->rx_rings[0]->count) {
1154 		netdev_info(netdev,
1155 			    "Changing Rx descriptor count from %d to %d\n",
1156 			    vsi->rx_rings[0]->count, new_rx_count);
1157 		rx_rings = kcalloc(vsi->alloc_queue_pairs,
1158 				   sizeof(struct i40e_ring), GFP_KERNEL);
1159 		if (!rx_rings) {
1160 			err = -ENOMEM;
1161 			goto free_tx;
1162 		}
1163 
1164 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1165 			/* clone ring and setup updated count */
1166 			rx_rings[i] = *vsi->rx_rings[i];
1167 			rx_rings[i].count = new_rx_count;
1168 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
1169 			if (err) {
1170 				while (i) {
1171 					i--;
1172 					i40e_free_rx_resources(&rx_rings[i]);
1173 				}
1174 				kfree(rx_rings);
1175 				rx_rings = NULL;
1176 
1177 				goto free_tx;
1178 			}
1179 		}
1180 	}
1181 
1182 	/* Bring interface down, copy in the new ring info,
1183 	 * then restore the interface
1184 	 */
1185 	i40e_down(vsi);
1186 
1187 	if (tx_rings) {
1188 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1189 			i40e_free_tx_resources(vsi->tx_rings[i]);
1190 			*vsi->tx_rings[i] = tx_rings[i];
1191 		}
1192 		kfree(tx_rings);
1193 		tx_rings = NULL;
1194 	}
1195 
1196 	if (rx_rings) {
1197 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1198 			i40e_free_rx_resources(vsi->rx_rings[i]);
1199 			*vsi->rx_rings[i] = rx_rings[i];
1200 		}
1201 		kfree(rx_rings);
1202 		rx_rings = NULL;
1203 	}
1204 
1205 	i40e_up(vsi);
1206 
1207 free_tx:
1208 	/* error cleanup if the Rx allocations failed after getting Tx */
1209 	if (tx_rings) {
1210 		for (i = 0; i < vsi->num_queue_pairs; i++)
1211 			i40e_free_tx_resources(&tx_rings[i]);
1212 		kfree(tx_rings);
1213 		tx_rings = NULL;
1214 	}
1215 
1216 done:
1217 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
1218 
1219 	return err;
1220 }
1221 
1222 static int i40e_get_sset_count(struct net_device *netdev, int sset)
1223 {
1224 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1225 	struct i40e_vsi *vsi = np->vsi;
1226 	struct i40e_pf *pf = vsi->back;
1227 
1228 	switch (sset) {
1229 	case ETH_SS_TEST:
1230 		return I40E_TEST_LEN;
1231 	case ETH_SS_STATS:
1232 		if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
1233 			int len = I40E_PF_STATS_LEN(netdev);
1234 
1235 			if (pf->lan_veb != I40E_NO_VEB)
1236 				len += I40E_VEB_STATS_LEN;
1237 			return len;
1238 		} else {
1239 			return I40E_VSI_STATS_LEN(netdev);
1240 		}
1241 	case ETH_SS_PRIV_FLAGS:
1242 		return I40E_PRIV_FLAGS_STR_LEN;
1243 	default:
1244 		return -EOPNOTSUPP;
1245 	}
1246 }
1247 
1248 static void i40e_get_ethtool_stats(struct net_device *netdev,
1249 				   struct ethtool_stats *stats, u64 *data)
1250 {
1251 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1252 	struct i40e_ring *tx_ring, *rx_ring;
1253 	struct i40e_vsi *vsi = np->vsi;
1254 	struct i40e_pf *pf = vsi->back;
1255 	int i = 0;
1256 	char *p;
1257 	int j;
1258 	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
1259 	unsigned int start;
1260 
1261 	i40e_update_stats(vsi);
1262 
1263 	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
1264 		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
1265 		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
1266 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1267 	}
1268 	for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
1269 		p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
1270 		data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
1271 			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1272 	}
1273 #ifdef I40E_FCOE
1274 	for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
1275 		p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
1276 		data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
1277 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1278 	}
1279 #endif
1280 	rcu_read_lock();
1281 	for (j = 0; j < vsi->num_queue_pairs; j++) {
1282 		tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
1283 
1284 		if (!tx_ring)
1285 			continue;
1286 
1287 		/* process Tx ring statistics */
1288 		do {
1289 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
1290 			data[i] = tx_ring->stats.packets;
1291 			data[i + 1] = tx_ring->stats.bytes;
1292 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1293 		i += 2;
1294 
1295 		/* Rx ring is the 2nd half of the queue pair */
1296 		rx_ring = &tx_ring[1];
1297 		do {
1298 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
1299 			data[i] = rx_ring->stats.packets;
1300 			data[i + 1] = rx_ring->stats.bytes;
1301 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
1302 		i += 2;
1303 	}
1304 	rcu_read_unlock();
1305 	if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
1306 		return;
1307 
1308 	if (pf->lan_veb != I40E_NO_VEB) {
1309 		struct i40e_veb *veb = pf->veb[pf->lan_veb];
1310 		for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
1311 			p = (char *)veb;
1312 			p += i40e_gstrings_veb_stats[j].stat_offset;
1313 			data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
1314 				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1315 		}
1316 	}
1317 	for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
1318 		p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
1319 		data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
1320 			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1321 	}
1322 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1323 		data[i++] = pf->stats.priority_xon_tx[j];
1324 		data[i++] = pf->stats.priority_xoff_tx[j];
1325 	}
1326 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1327 		data[i++] = pf->stats.priority_xon_rx[j];
1328 		data[i++] = pf->stats.priority_xoff_rx[j];
1329 	}
1330 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
1331 		data[i++] = pf->stats.priority_xon_2_xoff[j];
1332 }
1333 
1334 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
1335 			     u8 *data)
1336 {
1337 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1338 	struct i40e_vsi *vsi = np->vsi;
1339 	struct i40e_pf *pf = vsi->back;
1340 	char *p = (char *)data;
1341 	int i;
1342 
1343 	switch (stringset) {
1344 	case ETH_SS_TEST:
1345 		for (i = 0; i < I40E_TEST_LEN; i++) {
1346 			memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
1347 			data += ETH_GSTRING_LEN;
1348 		}
1349 		break;
1350 	case ETH_SS_STATS:
1351 		for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
1352 			snprintf(p, ETH_GSTRING_LEN, "%s",
1353 				 i40e_gstrings_net_stats[i].stat_string);
1354 			p += ETH_GSTRING_LEN;
1355 		}
1356 		for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
1357 			snprintf(p, ETH_GSTRING_LEN, "%s",
1358 				 i40e_gstrings_misc_stats[i].stat_string);
1359 			p += ETH_GSTRING_LEN;
1360 		}
1361 #ifdef I40E_FCOE
1362 		for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
1363 			snprintf(p, ETH_GSTRING_LEN, "%s",
1364 				 i40e_gstrings_fcoe_stats[i].stat_string);
1365 			p += ETH_GSTRING_LEN;
1366 		}
1367 #endif
1368 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1369 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
1370 			p += ETH_GSTRING_LEN;
1371 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
1372 			p += ETH_GSTRING_LEN;
1373 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
1374 			p += ETH_GSTRING_LEN;
1375 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
1376 			p += ETH_GSTRING_LEN;
1377 		}
1378 		if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
1379 			return;
1380 
1381 		if (pf->lan_veb != I40E_NO_VEB) {
1382 			for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
1383 				snprintf(p, ETH_GSTRING_LEN, "veb.%s",
1384 					i40e_gstrings_veb_stats[i].stat_string);
1385 				p += ETH_GSTRING_LEN;
1386 			}
1387 		}
1388 		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
1389 			snprintf(p, ETH_GSTRING_LEN, "port.%s",
1390 				 i40e_gstrings_stats[i].stat_string);
1391 			p += ETH_GSTRING_LEN;
1392 		}
1393 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1394 			snprintf(p, ETH_GSTRING_LEN,
1395 				 "port.tx_priority_%u_xon", i);
1396 			p += ETH_GSTRING_LEN;
1397 			snprintf(p, ETH_GSTRING_LEN,
1398 				 "port.tx_priority_%u_xoff", i);
1399 			p += ETH_GSTRING_LEN;
1400 		}
1401 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1402 			snprintf(p, ETH_GSTRING_LEN,
1403 				 "port.rx_priority_%u_xon", i);
1404 			p += ETH_GSTRING_LEN;
1405 			snprintf(p, ETH_GSTRING_LEN,
1406 				 "port.rx_priority_%u_xoff", i);
1407 			p += ETH_GSTRING_LEN;
1408 		}
1409 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1410 			snprintf(p, ETH_GSTRING_LEN,
1411 				 "port.rx_priority_%u_xon_2_xoff", i);
1412 			p += ETH_GSTRING_LEN;
1413 		}
1414 		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
1415 		break;
1416 	case ETH_SS_PRIV_FLAGS:
1417 		for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
1418 			memcpy(data, i40e_priv_flags_strings[i],
1419 			       ETH_GSTRING_LEN);
1420 			data += ETH_GSTRING_LEN;
1421 		}
1422 		break;
1423 	default:
1424 		break;
1425 	}
1426 }
1427 
1428 static int i40e_get_ts_info(struct net_device *dev,
1429 			    struct ethtool_ts_info *info)
1430 {
1431 	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
1432 
1433 	/* only report HW timestamping if PTP is enabled */
1434 	if (!(pf->flags & I40E_FLAG_PTP))
1435 		return ethtool_op_get_ts_info(dev, info);
1436 
1437 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1438 				SOF_TIMESTAMPING_RX_SOFTWARE |
1439 				SOF_TIMESTAMPING_SOFTWARE |
1440 				SOF_TIMESTAMPING_TX_HARDWARE |
1441 				SOF_TIMESTAMPING_RX_HARDWARE |
1442 				SOF_TIMESTAMPING_RAW_HARDWARE;
1443 
1444 	if (pf->ptp_clock)
1445 		info->phc_index = ptp_clock_index(pf->ptp_clock);
1446 	else
1447 		info->phc_index = -1;
1448 
1449 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1450 
1451 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1452 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1453 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1454 			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1455 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1456 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1457 			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1458 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1459 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1460 			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
1461 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1462 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1463 
1464 	return 0;
1465 }
1466 
1467 static int i40e_link_test(struct net_device *netdev, u64 *data)
1468 {
1469 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1470 	struct i40e_pf *pf = np->vsi->back;
1471 
1472 	netif_info(pf, hw, netdev, "link test\n");
1473 	if (i40e_get_link_status(&pf->hw))
1474 		*data = 0;
1475 	else
1476 		*data = 1;
1477 
1478 	return *data;
1479 }
1480 
1481 static int i40e_reg_test(struct net_device *netdev, u64 *data)
1482 {
1483 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1484 	struct i40e_pf *pf = np->vsi->back;
1485 
1486 	netif_info(pf, hw, netdev, "register test\n");
1487 	*data = i40e_diag_reg_test(&pf->hw);
1488 
1489 	return *data;
1490 }
1491 
1492 static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
1493 {
1494 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1495 	struct i40e_pf *pf = np->vsi->back;
1496 
1497 	netif_info(pf, hw, netdev, "eeprom test\n");
1498 	*data = i40e_diag_eeprom_test(&pf->hw);
1499 
1500 	/* forcebly clear the NVM Update state machine */
1501 	pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
1502 
1503 	return *data;
1504 }
1505 
1506 static int i40e_intr_test(struct net_device *netdev, u64 *data)
1507 {
1508 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1509 	struct i40e_pf *pf = np->vsi->back;
1510 	u16 swc_old = pf->sw_int_count;
1511 
1512 	netif_info(pf, hw, netdev, "interrupt test\n");
1513 	wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
1514 	     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
1515 	      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1516 	      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
1517 	      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
1518 	      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
1519 	usleep_range(1000, 2000);
1520 	*data = (swc_old == pf->sw_int_count);
1521 
1522 	return *data;
1523 }
1524 
1525 static int i40e_loopback_test(struct net_device *netdev, u64 *data)
1526 {
1527 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1528 	struct i40e_pf *pf = np->vsi->back;
1529 
1530 	netif_info(pf, hw, netdev, "loopback test not implemented\n");
1531 	*data = 0;
1532 
1533 	return *data;
1534 }
1535 
1536 static void i40e_diag_test(struct net_device *netdev,
1537 			   struct ethtool_test *eth_test, u64 *data)
1538 {
1539 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1540 	bool if_running = netif_running(netdev);
1541 	struct i40e_pf *pf = np->vsi->back;
1542 
1543 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1544 		/* Offline tests */
1545 		netif_info(pf, drv, netdev, "offline testing starting\n");
1546 
1547 		set_bit(__I40E_TESTING, &pf->state);
1548 		/* If the device is online then take it offline */
1549 		if (if_running)
1550 			/* indicate we're in test mode */
1551 			dev_close(netdev);
1552 		else
1553 			i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
1554 
1555 		/* Link test performed before hardware reset
1556 		 * so autoneg doesn't interfere with test result
1557 		 */
1558 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1559 			eth_test->flags |= ETH_TEST_FL_FAILED;
1560 
1561 		if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
1562 			eth_test->flags |= ETH_TEST_FL_FAILED;
1563 
1564 		if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
1565 			eth_test->flags |= ETH_TEST_FL_FAILED;
1566 
1567 		if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
1568 			eth_test->flags |= ETH_TEST_FL_FAILED;
1569 
1570 		/* run reg test last, a reset is required after it */
1571 		if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
1572 			eth_test->flags |= ETH_TEST_FL_FAILED;
1573 
1574 		clear_bit(__I40E_TESTING, &pf->state);
1575 		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
1576 
1577 		if (if_running)
1578 			dev_open(netdev);
1579 	} else {
1580 		/* Online tests */
1581 		netif_info(pf, drv, netdev, "online testing starting\n");
1582 
1583 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1584 			eth_test->flags |= ETH_TEST_FL_FAILED;
1585 
1586 		/* Offline only tests, not run in online; pass by default */
1587 		data[I40E_ETH_TEST_REG] = 0;
1588 		data[I40E_ETH_TEST_EEPROM] = 0;
1589 		data[I40E_ETH_TEST_INTR] = 0;
1590 		data[I40E_ETH_TEST_LOOPBACK] = 0;
1591 	}
1592 
1593 	netif_info(pf, drv, netdev, "testing finished\n");
1594 }
1595 
1596 static void i40e_get_wol(struct net_device *netdev,
1597 			 struct ethtool_wolinfo *wol)
1598 {
1599 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1600 	struct i40e_pf *pf = np->vsi->back;
1601 	struct i40e_hw *hw = &pf->hw;
1602 	u16 wol_nvm_bits;
1603 
1604 	/* NVM bit on means WoL disabled for the port */
1605 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1606 	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
1607 		wol->supported = 0;
1608 		wol->wolopts = 0;
1609 	} else {
1610 		wol->supported = WAKE_MAGIC;
1611 		wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
1612 	}
1613 }
1614 
1615 /**
1616  * i40e_set_wol - set the WakeOnLAN configuration
1617  * @netdev: the netdev in question
1618  * @wol: the ethtool WoL setting data
1619  **/
1620 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1621 {
1622 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1623 	struct i40e_pf *pf = np->vsi->back;
1624 	struct i40e_vsi *vsi = np->vsi;
1625 	struct i40e_hw *hw = &pf->hw;
1626 	u16 wol_nvm_bits;
1627 
1628 	/* WoL not supported if this isn't the controlling PF on the port */
1629 	if (hw->partition_id != 1) {
1630 		i40e_partition_setting_complaint(pf);
1631 		return -EOPNOTSUPP;
1632 	}
1633 
1634 	if (vsi != pf->vsi[pf->lan_vsi])
1635 		return -EOPNOTSUPP;
1636 
1637 	/* NVM bit on means WoL disabled for the port */
1638 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1639 	if (((1 << hw->port) & wol_nvm_bits))
1640 		return -EOPNOTSUPP;
1641 
1642 	/* only magic packet is supported */
1643 	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
1644 		return -EOPNOTSUPP;
1645 
1646 	/* is this a new value? */
1647 	if (pf->wol_en != !!wol->wolopts) {
1648 		pf->wol_en = !!wol->wolopts;
1649 		device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 static int i40e_set_phys_id(struct net_device *netdev,
1656 			    enum ethtool_phys_id_state state)
1657 {
1658 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1659 	struct i40e_pf *pf = np->vsi->back;
1660 	struct i40e_hw *hw = &pf->hw;
1661 	int blink_freq = 2;
1662 
1663 	switch (state) {
1664 	case ETHTOOL_ID_ACTIVE:
1665 		pf->led_status = i40e_led_get(hw);
1666 		return blink_freq;
1667 	case ETHTOOL_ID_ON:
1668 		i40e_led_set(hw, 0xF, false);
1669 		break;
1670 	case ETHTOOL_ID_OFF:
1671 		i40e_led_set(hw, 0x0, false);
1672 		break;
1673 	case ETHTOOL_ID_INACTIVE:
1674 		i40e_led_set(hw, pf->led_status, false);
1675 		break;
1676 	default:
1677 		break;
1678 	}
1679 
1680 	return 0;
1681 }
1682 
1683 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
1684  * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
1685  * 125us (8000 interrupts per second) == ITR(62)
1686  */
1687 
1688 static int i40e_get_coalesce(struct net_device *netdev,
1689 			     struct ethtool_coalesce *ec)
1690 {
1691 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1692 	struct i40e_vsi *vsi = np->vsi;
1693 
1694 	ec->tx_max_coalesced_frames_irq = vsi->work_limit;
1695 	ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1696 
1697 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1698 		ec->use_adaptive_rx_coalesce = 1;
1699 
1700 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1701 		ec->use_adaptive_tx_coalesce = 1;
1702 
1703 	ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
1704 	ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
1705 
1706 	return 0;
1707 }
1708 
1709 static int i40e_set_coalesce(struct net_device *netdev,
1710 			     struct ethtool_coalesce *ec)
1711 {
1712 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1713 	struct i40e_q_vector *q_vector;
1714 	struct i40e_vsi *vsi = np->vsi;
1715 	struct i40e_pf *pf = vsi->back;
1716 	struct i40e_hw *hw = &pf->hw;
1717 	u16 vector;
1718 	int i;
1719 
1720 	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1721 		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1722 
1723 	vector = vsi->base_vector;
1724 	if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1725 	    (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
1726 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1727 	} else if (ec->rx_coalesce_usecs == 0) {
1728 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1729 		if (ec->use_adaptive_rx_coalesce)
1730 			netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
1731 	} else {
1732 		netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
1733 		return -EINVAL;
1734 	}
1735 
1736 	if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1737 	    (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
1738 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1739 	} else if (ec->tx_coalesce_usecs == 0) {
1740 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1741 		if (ec->use_adaptive_tx_coalesce)
1742 			netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
1743 	} else {
1744 		netif_info(pf, drv, netdev,
1745 			   "Invalid value, tx-usecs range is 0-8160\n");
1746 		return -EINVAL;
1747 	}
1748 
1749 	if (ec->use_adaptive_rx_coalesce)
1750 		vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
1751 	else
1752 		vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
1753 
1754 	if (ec->use_adaptive_tx_coalesce)
1755 		vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
1756 	else
1757 		vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
1758 
1759 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1760 		q_vector = vsi->q_vectors[i];
1761 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1762 		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1763 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1764 		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1765 		i40e_flush(hw);
1766 	}
1767 
1768 	return 0;
1769 }
1770 
1771 /**
1772  * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1773  * @pf: pointer to the physical function struct
1774  * @cmd: ethtool rxnfc command
1775  *
1776  * Returns Success if the flow is supported, else Invalid Input.
1777  **/
1778 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1779 {
1780 	cmd->data = 0;
1781 
1782 	if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
1783 		cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
1784 		cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
1785 		return 0;
1786 	}
1787 	/* Report default options for RSS on i40e */
1788 	switch (cmd->flow_type) {
1789 	case TCP_V4_FLOW:
1790 	case UDP_V4_FLOW:
1791 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1792 	/* fall through to add IP fields */
1793 	case SCTP_V4_FLOW:
1794 	case AH_ESP_V4_FLOW:
1795 	case AH_V4_FLOW:
1796 	case ESP_V4_FLOW:
1797 	case IPV4_FLOW:
1798 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1799 		break;
1800 	case TCP_V6_FLOW:
1801 	case UDP_V6_FLOW:
1802 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1803 	/* fall through to add IP fields */
1804 	case SCTP_V6_FLOW:
1805 	case AH_ESP_V6_FLOW:
1806 	case AH_V6_FLOW:
1807 	case ESP_V6_FLOW:
1808 	case IPV6_FLOW:
1809 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1810 		break;
1811 	default:
1812 		return -EINVAL;
1813 	}
1814 
1815 	return 0;
1816 }
1817 
1818 /**
1819  * i40e_get_ethtool_fdir_all - Populates the rule count of a command
1820  * @pf: Pointer to the physical function struct
1821  * @cmd: The command to get or set Rx flow classification rules
1822  * @rule_locs: Array of used rule locations
1823  *
1824  * This function populates both the total and actual rule count of
1825  * the ethtool flow classification command
1826  *
1827  * Returns 0 on success or -EMSGSIZE if entry not found
1828  **/
1829 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1830 				     struct ethtool_rxnfc *cmd,
1831 				     u32 *rule_locs)
1832 {
1833 	struct i40e_fdir_filter *rule;
1834 	struct hlist_node *node2;
1835 	int cnt = 0;
1836 
1837 	/* report total rule count */
1838 	cmd->data = i40e_get_fd_cnt_all(pf);
1839 
1840 	hlist_for_each_entry_safe(rule, node2,
1841 				  &pf->fdir_filter_list, fdir_node) {
1842 		if (cnt == cmd->rule_cnt)
1843 			return -EMSGSIZE;
1844 
1845 		rule_locs[cnt] = rule->fd_id;
1846 		cnt++;
1847 	}
1848 
1849 	cmd->rule_cnt = cnt;
1850 
1851 	return 0;
1852 }
1853 
1854 /**
1855  * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
1856  * @pf: Pointer to the physical function struct
1857  * @cmd: The command to get or set Rx flow classification rules
1858  *
1859  * This function looks up a filter based on the Rx flow classification
1860  * command and fills the flow spec info for it if found
1861  *
1862  * Returns 0 on success or -EINVAL if filter not found
1863  **/
1864 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1865 				       struct ethtool_rxnfc *cmd)
1866 {
1867 	struct ethtool_rx_flow_spec *fsp =
1868 			(struct ethtool_rx_flow_spec *)&cmd->fs;
1869 	struct i40e_fdir_filter *rule = NULL;
1870 	struct hlist_node *node2;
1871 
1872 	hlist_for_each_entry_safe(rule, node2,
1873 				  &pf->fdir_filter_list, fdir_node) {
1874 		if (fsp->location <= rule->fd_id)
1875 			break;
1876 	}
1877 
1878 	if (!rule || fsp->location != rule->fd_id)
1879 		return -EINVAL;
1880 
1881 	fsp->flow_type = rule->flow_type;
1882 	if (fsp->flow_type == IP_USER_FLOW) {
1883 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1884 		fsp->h_u.usr_ip4_spec.proto = 0;
1885 		fsp->m_u.usr_ip4_spec.proto = 0;
1886 	}
1887 
1888 	/* Reverse the src and dest notion, since the HW views them from
1889 	 * Tx perspective where as the user expects it from Rx filter view.
1890 	 */
1891 	fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
1892 	fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
1893 	fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
1894 	fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
1895 
1896 	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
1897 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1898 	else
1899 		fsp->ring_cookie = rule->q_index;
1900 
1901 	return 0;
1902 }
1903 
1904 /**
1905  * i40e_get_rxnfc - command to get RX flow classification rules
1906  * @netdev: network interface device structure
1907  * @cmd: ethtool rxnfc command
1908  *
1909  * Returns Success if the command is supported.
1910  **/
1911 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1912 			  u32 *rule_locs)
1913 {
1914 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1915 	struct i40e_vsi *vsi = np->vsi;
1916 	struct i40e_pf *pf = vsi->back;
1917 	int ret = -EOPNOTSUPP;
1918 
1919 	switch (cmd->cmd) {
1920 	case ETHTOOL_GRXRINGS:
1921 		cmd->data = vsi->alloc_queue_pairs;
1922 		ret = 0;
1923 		break;
1924 	case ETHTOOL_GRXFH:
1925 		ret = i40e_get_rss_hash_opts(pf, cmd);
1926 		break;
1927 	case ETHTOOL_GRXCLSRLCNT:
1928 		cmd->rule_cnt = pf->fdir_pf_active_filters;
1929 		/* report total rule count */
1930 		cmd->data = i40e_get_fd_cnt_all(pf);
1931 		ret = 0;
1932 		break;
1933 	case ETHTOOL_GRXCLSRULE:
1934 		ret = i40e_get_ethtool_fdir_entry(pf, cmd);
1935 		break;
1936 	case ETHTOOL_GRXCLSRLALL:
1937 		ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
1938 		break;
1939 	default:
1940 		break;
1941 	}
1942 
1943 	return ret;
1944 }
1945 
1946 /**
1947  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1948  * @pf: pointer to the physical function struct
1949  * @cmd: ethtool rxnfc command
1950  *
1951  * Returns Success if the flow input set is supported.
1952  **/
1953 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1954 {
1955 	struct i40e_hw *hw = &pf->hw;
1956 	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1957 		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1958 
1959 	/* RSS does not support anything other than hashing
1960 	 * to queues on src and dst IPs and ports
1961 	 */
1962 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1963 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
1964 		return -EINVAL;
1965 
1966 	/* We need at least the IP SRC and DEST fields for hashing */
1967 	if (!(nfc->data & RXH_IP_SRC) ||
1968 	    !(nfc->data & RXH_IP_DST))
1969 		return -EINVAL;
1970 
1971 	switch (nfc->flow_type) {
1972 	case TCP_V4_FLOW:
1973 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1974 		case 0:
1975 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1976 			break;
1977 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1978 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1979 			break;
1980 		default:
1981 			return -EINVAL;
1982 		}
1983 		break;
1984 	case TCP_V6_FLOW:
1985 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1986 		case 0:
1987 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1988 			break;
1989 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1990 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1991 			break;
1992 		default:
1993 			return -EINVAL;
1994 		}
1995 		break;
1996 	case UDP_V4_FLOW:
1997 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1998 		case 0:
1999 			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2000 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
2001 			break;
2002 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2003 			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2004 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
2005 			break;
2006 		default:
2007 			return -EINVAL;
2008 		}
2009 		break;
2010 	case UDP_V6_FLOW:
2011 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2012 		case 0:
2013 			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2014 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
2015 			break;
2016 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2017 			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2018 				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
2019 			break;
2020 		default:
2021 			return -EINVAL;
2022 		}
2023 		break;
2024 	case AH_ESP_V4_FLOW:
2025 	case AH_V4_FLOW:
2026 	case ESP_V4_FLOW:
2027 	case SCTP_V4_FLOW:
2028 		if ((nfc->data & RXH_L4_B_0_1) ||
2029 		    (nfc->data & RXH_L4_B_2_3))
2030 			return -EINVAL;
2031 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2032 		break;
2033 	case AH_ESP_V6_FLOW:
2034 	case AH_V6_FLOW:
2035 	case ESP_V6_FLOW:
2036 	case SCTP_V6_FLOW:
2037 		if ((nfc->data & RXH_L4_B_0_1) ||
2038 		    (nfc->data & RXH_L4_B_2_3))
2039 			return -EINVAL;
2040 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2041 		break;
2042 	case IPV4_FLOW:
2043 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2044 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
2045 		break;
2046 	case IPV6_FLOW:
2047 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2048 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2049 		break;
2050 	default:
2051 		return -EINVAL;
2052 	}
2053 
2054 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
2055 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2056 	i40e_flush(hw);
2057 
2058 	/* Save setting for future output/update */
2059 	pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
2060 
2061 	return 0;
2062 }
2063 
2064 /**
2065  * i40e_match_fdir_input_set - Match a new filter against an existing one
2066  * @rule: The filter already added
2067  * @input: The new filter to comapre against
2068  *
2069  * Returns true if the two input set match
2070  **/
2071 static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
2072 				      struct i40e_fdir_filter *input)
2073 {
2074 	if ((rule->dst_ip[0] != input->dst_ip[0]) ||
2075 	    (rule->src_ip[0] != input->src_ip[0]) ||
2076 	    (rule->dst_port != input->dst_port) ||
2077 	    (rule->src_port != input->src_port))
2078 		return false;
2079 	return true;
2080 }
2081 
2082 /**
2083  * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
2084  * @vsi: Pointer to the targeted VSI
2085  * @input: The filter to update or NULL to indicate deletion
2086  * @sw_idx: Software index to the filter
2087  * @cmd: The command to get or set Rx flow classification rules
2088  *
2089  * This function updates (or deletes) a Flow Director entry from
2090  * the hlist of the corresponding PF
2091  *
2092  * Returns 0 on success
2093  **/
2094 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
2095 					  struct i40e_fdir_filter *input,
2096 					  u16 sw_idx,
2097 					  struct ethtool_rxnfc *cmd)
2098 {
2099 	struct i40e_fdir_filter *rule, *parent;
2100 	struct i40e_pf *pf = vsi->back;
2101 	struct hlist_node *node2;
2102 	int err = -EINVAL;
2103 
2104 	parent = NULL;
2105 	rule = NULL;
2106 
2107 	hlist_for_each_entry_safe(rule, node2,
2108 				  &pf->fdir_filter_list, fdir_node) {
2109 		/* hash found, or no matching entry */
2110 		if (rule->fd_id >= sw_idx)
2111 			break;
2112 		parent = rule;
2113 	}
2114 
2115 	/* if there is an old rule occupying our place remove it */
2116 	if (rule && (rule->fd_id == sw_idx)) {
2117 		if (input && !i40e_match_fdir_input_set(rule, input))
2118 			err = i40e_add_del_fdir(vsi, rule, false);
2119 		else if (!input)
2120 			err = i40e_add_del_fdir(vsi, rule, false);
2121 		hlist_del(&rule->fdir_node);
2122 		kfree(rule);
2123 		pf->fdir_pf_active_filters--;
2124 	}
2125 
2126 	/* If no input this was a delete, err should be 0 if a rule was
2127 	 * successfully found and removed from the list else -EINVAL
2128 	 */
2129 	if (!input)
2130 		return err;
2131 
2132 	/* initialize node and set software index */
2133 	INIT_HLIST_NODE(&input->fdir_node);
2134 
2135 	/* add filter to the list */
2136 	if (parent)
2137 		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2138 	else
2139 		hlist_add_head(&input->fdir_node,
2140 			       &pf->fdir_filter_list);
2141 
2142 	/* update counts */
2143 	pf->fdir_pf_active_filters++;
2144 
2145 	return 0;
2146 }
2147 
2148 /**
2149  * i40e_del_fdir_entry - Deletes a Flow Director filter entry
2150  * @vsi: Pointer to the targeted VSI
2151  * @cmd: The command to get or set Rx flow classification rules
2152  *
2153  * The function removes a Flow Director filter entry from the
2154  * hlist of the corresponding PF
2155  *
2156  * Returns 0 on success
2157  */
2158 static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
2159 			       struct ethtool_rxnfc *cmd)
2160 {
2161 	struct ethtool_rx_flow_spec *fsp =
2162 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2163 	struct i40e_pf *pf = vsi->back;
2164 	int ret = 0;
2165 
2166 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2167 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2168 		return -EBUSY;
2169 
2170 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2171 		return -EBUSY;
2172 
2173 	ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
2174 
2175 	i40e_fdir_check_and_reenable(pf);
2176 	return ret;
2177 }
2178 
2179 /**
2180  * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
2181  * @vsi: pointer to the targeted VSI
2182  * @cmd: command to get or set RX flow classification rules
2183  *
2184  * Add Flow Director filters for a specific flow spec based on their
2185  * protocol.  Returns 0 if the filters were successfully added.
2186  **/
2187 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2188 				 struct ethtool_rxnfc *cmd)
2189 {
2190 	struct ethtool_rx_flow_spec *fsp;
2191 	struct i40e_fdir_filter *input;
2192 	struct i40e_pf *pf;
2193 	int ret = -EINVAL;
2194 
2195 	if (!vsi)
2196 		return -EINVAL;
2197 
2198 	pf = vsi->back;
2199 
2200 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2201 		return -EOPNOTSUPP;
2202 
2203 	if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
2204 		return -ENOSPC;
2205 
2206 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2207 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2208 		return -EBUSY;
2209 
2210 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2211 		return -EBUSY;
2212 
2213 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2214 
2215 	if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
2216 			      pf->hw.func_caps.fd_filters_guaranteed)) {
2217 		return -EINVAL;
2218 	}
2219 
2220 	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2221 	    (fsp->ring_cookie >= vsi->num_queue_pairs))
2222 		return -EINVAL;
2223 
2224 	input = kzalloc(sizeof(*input), GFP_KERNEL);
2225 
2226 	if (!input)
2227 		return -ENOMEM;
2228 
2229 	input->fd_id = fsp->location;
2230 
2231 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2232 		input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2233 	else
2234 		input->dest_ctl =
2235 			     I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
2236 
2237 	input->q_index = fsp->ring_cookie;
2238 	input->flex_off = 0;
2239 	input->pctype = 0;
2240 	input->dest_vsi = vsi->id;
2241 	input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
2242 	input->cnt_index  = pf->fd_sb_cnt_idx;
2243 	input->flow_type = fsp->flow_type;
2244 	input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
2245 
2246 	/* Reverse the src and dest notion, since the HW expects them to be from
2247 	 * Tx perspective where as the input from user is from Rx filter view.
2248 	 */
2249 	input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
2250 	input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
2251 	input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2252 	input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2253 
2254 	ret = i40e_add_del_fdir(vsi, input, true);
2255 	if (ret)
2256 		kfree(input);
2257 	else
2258 		i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
2259 
2260 	return ret;
2261 }
2262 
2263 /**
2264  * i40e_set_rxnfc - command to set RX flow classification rules
2265  * @netdev: network interface device structure
2266  * @cmd: ethtool rxnfc command
2267  *
2268  * Returns Success if the command is supported.
2269  **/
2270 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2271 {
2272 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2273 	struct i40e_vsi *vsi = np->vsi;
2274 	struct i40e_pf *pf = vsi->back;
2275 	int ret = -EOPNOTSUPP;
2276 
2277 	switch (cmd->cmd) {
2278 	case ETHTOOL_SRXFH:
2279 		ret = i40e_set_rss_hash_opt(pf, cmd);
2280 		break;
2281 	case ETHTOOL_SRXCLSRLINS:
2282 		ret = i40e_add_fdir_ethtool(vsi, cmd);
2283 		break;
2284 	case ETHTOOL_SRXCLSRLDEL:
2285 		ret = i40e_del_fdir_entry(vsi, cmd);
2286 		break;
2287 	default:
2288 		break;
2289 	}
2290 
2291 	return ret;
2292 }
2293 
2294 /**
2295  * i40e_max_channels - get Max number of combined channels supported
2296  * @vsi: vsi pointer
2297  **/
2298 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
2299 {
2300 	/* TODO: This code assumes DCB and FD is disabled for now. */
2301 	return vsi->alloc_queue_pairs;
2302 }
2303 
2304 /**
2305  * i40e_get_channels - Get the current channels enabled and max supported etc.
2306  * @netdev: network interface device structure
2307  * @ch: ethtool channels structure
2308  *
2309  * We don't support separate tx and rx queues as channels. The other count
2310  * represents how many queues are being used for control. max_combined counts
2311  * how many queue pairs we can support. They may not be mapped 1 to 1 with
2312  * q_vectors since we support a lot more queue pairs than q_vectors.
2313  **/
2314 static void i40e_get_channels(struct net_device *dev,
2315 			       struct ethtool_channels *ch)
2316 {
2317 	struct i40e_netdev_priv *np = netdev_priv(dev);
2318 	struct i40e_vsi *vsi = np->vsi;
2319 	struct i40e_pf *pf = vsi->back;
2320 
2321 	/* report maximum channels */
2322 	ch->max_combined = i40e_max_channels(vsi);
2323 
2324 	/* report info for other vector */
2325 	ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
2326 	ch->max_other = ch->other_count;
2327 
2328 	/* Note: This code assumes DCB is disabled for now. */
2329 	ch->combined_count = vsi->num_queue_pairs;
2330 }
2331 
2332 /**
2333  * i40e_set_channels - Set the new channels count.
2334  * @netdev: network interface device structure
2335  * @ch: ethtool channels structure
2336  *
2337  * The new channels count may not be the same as requested by the user
2338  * since it gets rounded down to a power of 2 value.
2339  **/
2340 static int i40e_set_channels(struct net_device *dev,
2341 			      struct ethtool_channels *ch)
2342 {
2343 	struct i40e_netdev_priv *np = netdev_priv(dev);
2344 	unsigned int count = ch->combined_count;
2345 	struct i40e_vsi *vsi = np->vsi;
2346 	struct i40e_pf *pf = vsi->back;
2347 	int new_count;
2348 
2349 	/* We do not support setting channels for any other VSI at present */
2350 	if (vsi->type != I40E_VSI_MAIN)
2351 		return -EINVAL;
2352 
2353 	/* verify they are not requesting separate vectors */
2354 	if (!count || ch->rx_count || ch->tx_count)
2355 		return -EINVAL;
2356 
2357 	/* verify other_count has not changed */
2358 	if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
2359 		return -EINVAL;
2360 
2361 	/* verify the number of channels does not exceed hardware limits */
2362 	if (count > i40e_max_channels(vsi))
2363 		return -EINVAL;
2364 
2365 	/* update feature limits from largest to smallest supported values */
2366 	/* TODO: Flow director limit, DCB etc */
2367 
2368 	/* use rss_reconfig to rebuild with new queue count and update traffic
2369 	 * class queue mapping
2370 	 */
2371 	new_count = i40e_reconfig_rss_queues(pf, count);
2372 	if (new_count > 0)
2373 		return 0;
2374 	else
2375 		return -EINVAL;
2376 }
2377 
2378 #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
2379 /**
2380  * i40e_get_rxfh_key_size - get the RSS hash key size
2381  * @netdev: network interface device structure
2382  *
2383  * Returns the table size.
2384  **/
2385 static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
2386 {
2387 	return I40E_HKEY_ARRAY_SIZE;
2388 }
2389 
2390 /**
2391  * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
2392  * @netdev: network interface device structure
2393  *
2394  * Returns the table size.
2395  **/
2396 static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
2397 {
2398 	return I40E_HLUT_ARRAY_SIZE;
2399 }
2400 
2401 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2402 			 u8 *hfunc)
2403 {
2404 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2405 	struct i40e_vsi *vsi = np->vsi;
2406 	struct i40e_pf *pf = vsi->back;
2407 	struct i40e_hw *hw = &pf->hw;
2408 	u32 reg_val;
2409 	int i, j;
2410 
2411 	if (hfunc)
2412 		*hfunc = ETH_RSS_HASH_TOP;
2413 
2414 	if (!indir)
2415 		return 0;
2416 
2417 	for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
2418 		reg_val = rd32(hw, I40E_PFQF_HLUT(i));
2419 		indir[j++] = reg_val & 0xff;
2420 		indir[j++] = (reg_val >> 8) & 0xff;
2421 		indir[j++] = (reg_val >> 16) & 0xff;
2422 		indir[j++] = (reg_val >> 24) & 0xff;
2423 	}
2424 
2425 	if (key) {
2426 		for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
2427 			reg_val = rd32(hw, I40E_PFQF_HKEY(i));
2428 			key[j++] = (u8)(reg_val & 0xff);
2429 			key[j++] = (u8)((reg_val >> 8) & 0xff);
2430 			key[j++] = (u8)((reg_val >> 16) & 0xff);
2431 			key[j++] = (u8)((reg_val >> 24) & 0xff);
2432 		}
2433 	}
2434 	return 0;
2435 }
2436 
2437 /**
2438  * i40e_set_rxfh - set the rx flow hash indirection table
2439  * @netdev: network interface device structure
2440  * @indir: indirection table
2441  * @key: hash key
2442  *
2443  * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
2444  * returns 0 after programming the table.
2445  **/
2446 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
2447 			 const u8 *key, const u8 hfunc)
2448 {
2449 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2450 	struct i40e_vsi *vsi = np->vsi;
2451 	struct i40e_pf *pf = vsi->back;
2452 	struct i40e_hw *hw = &pf->hw;
2453 	u32 reg_val;
2454 	int i, j;
2455 
2456 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
2457 		return -EOPNOTSUPP;
2458 
2459 	if (!indir)
2460 		return 0;
2461 
2462 	for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
2463 		reg_val = indir[j++];
2464 		reg_val |= indir[j++] << 8;
2465 		reg_val |= indir[j++] << 16;
2466 		reg_val |= indir[j++] << 24;
2467 		wr32(hw, I40E_PFQF_HLUT(i), reg_val);
2468 	}
2469 
2470 	if (key) {
2471 		for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
2472 			reg_val = key[j++];
2473 			reg_val |= key[j++] << 8;
2474 			reg_val |= key[j++] << 16;
2475 			reg_val |= key[j++] << 24;
2476 			wr32(hw, I40E_PFQF_HKEY(i), reg_val);
2477 		}
2478 	}
2479 	return 0;
2480 }
2481 
2482 /**
2483  * i40e_get_priv_flags - report device private flags
2484  * @dev: network interface device structure
2485  *
2486  * The get string set count and the string set should be matched for each
2487  * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
2488  * array.
2489  *
2490  * Returns a u32 bitmap of flags.
2491  **/
2492 static u32 i40e_get_priv_flags(struct net_device *dev)
2493 {
2494 	struct i40e_netdev_priv *np = netdev_priv(dev);
2495 	struct i40e_vsi *vsi = np->vsi;
2496 	struct i40e_pf *pf = vsi->back;
2497 	u32 ret_flags = 0;
2498 
2499 	ret_flags |= pf->hw.func_caps.npar_enable ?
2500 		I40E_PRIV_FLAGS_NPAR_FLAG : 0;
2501 
2502 	return ret_flags;
2503 }
2504 
2505 static const struct ethtool_ops i40e_ethtool_ops = {
2506 	.get_settings		= i40e_get_settings,
2507 	.set_settings		= i40e_set_settings,
2508 	.get_drvinfo		= i40e_get_drvinfo,
2509 	.get_regs_len		= i40e_get_regs_len,
2510 	.get_regs		= i40e_get_regs,
2511 	.nway_reset		= i40e_nway_reset,
2512 	.get_link		= ethtool_op_get_link,
2513 	.get_wol		= i40e_get_wol,
2514 	.set_wol		= i40e_set_wol,
2515 	.set_eeprom		= i40e_set_eeprom,
2516 	.get_eeprom_len		= i40e_get_eeprom_len,
2517 	.get_eeprom		= i40e_get_eeprom,
2518 	.get_ringparam		= i40e_get_ringparam,
2519 	.set_ringparam		= i40e_set_ringparam,
2520 	.get_pauseparam		= i40e_get_pauseparam,
2521 	.set_pauseparam		= i40e_set_pauseparam,
2522 	.get_msglevel		= i40e_get_msglevel,
2523 	.set_msglevel		= i40e_set_msglevel,
2524 	.get_rxnfc		= i40e_get_rxnfc,
2525 	.set_rxnfc		= i40e_set_rxnfc,
2526 	.self_test		= i40e_diag_test,
2527 	.get_strings		= i40e_get_strings,
2528 	.set_phys_id		= i40e_set_phys_id,
2529 	.get_sset_count		= i40e_get_sset_count,
2530 	.get_ethtool_stats	= i40e_get_ethtool_stats,
2531 	.get_coalesce		= i40e_get_coalesce,
2532 	.set_coalesce		= i40e_set_coalesce,
2533 	.get_rxfh_key_size	= i40e_get_rxfh_key_size,
2534 	.get_rxfh_indir_size	= i40e_get_rxfh_indir_size,
2535 	.get_rxfh		= i40e_get_rxfh,
2536 	.set_rxfh		= i40e_set_rxfh,
2537 	.get_channels		= i40e_get_channels,
2538 	.set_channels		= i40e_set_channels,
2539 	.get_ts_info		= i40e_get_ts_info,
2540 	.get_priv_flags		= i40e_get_priv_flags,
2541 };
2542 
2543 void i40e_set_ethtool_ops(struct net_device *netdev)
2544 {
2545 	netdev->ethtool_ops = &i40e_ethtool_ops;
2546 }
2547