1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 /* ethtool support for i40e */
28 
29 #include "i40e.h"
30 #include "i40e_diag.h"
31 
32 struct i40e_stats {
33 	char stat_string[ETH_GSTRING_LEN];
34 	int sizeof_stat;
35 	int stat_offset;
36 };
37 
38 #define I40E_STAT(_type, _name, _stat) { \
39 	.stat_string = _name, \
40 	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
41 	.stat_offset = offsetof(_type, _stat) \
42 }
43 
44 #define I40E_NETDEV_STAT(_net_stat) \
45 		I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
46 #define I40E_PF_STAT(_name, _stat) \
47 		I40E_STAT(struct i40e_pf, _name, _stat)
48 #define I40E_VSI_STAT(_name, _stat) \
49 		I40E_STAT(struct i40e_vsi, _name, _stat)
50 #define I40E_VEB_STAT(_name, _stat) \
51 		I40E_STAT(struct i40e_veb, _name, _stat)
52 
53 static const struct i40e_stats i40e_gstrings_net_stats[] = {
54 	I40E_NETDEV_STAT(rx_packets),
55 	I40E_NETDEV_STAT(tx_packets),
56 	I40E_NETDEV_STAT(rx_bytes),
57 	I40E_NETDEV_STAT(tx_bytes),
58 	I40E_NETDEV_STAT(rx_errors),
59 	I40E_NETDEV_STAT(tx_errors),
60 	I40E_NETDEV_STAT(rx_dropped),
61 	I40E_NETDEV_STAT(tx_dropped),
62 	I40E_NETDEV_STAT(collisions),
63 	I40E_NETDEV_STAT(rx_length_errors),
64 	I40E_NETDEV_STAT(rx_crc_errors),
65 };
66 
67 static const struct i40e_stats i40e_gstrings_veb_stats[] = {
68 	I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
69 	I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
70 	I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
71 	I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
72 	I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
73 	I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
74 	I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
75 	I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
76 	I40E_VEB_STAT("rx_discards", stats.rx_discards),
77 	I40E_VEB_STAT("tx_discards", stats.tx_discards),
78 	I40E_VEB_STAT("tx_errors", stats.tx_errors),
79 	I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
80 };
81 
82 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
83 	I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
84 	I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
85 	I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
86 	I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
87 	I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
88 	I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
89 	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
90 };
91 
92 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
93 				 struct ethtool_rxnfc *cmd);
94 
95 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
96  * but they are separate.  This device supports Virtualization, and
97  * as such might have several netdevs supporting VMDq and FCoE going
98  * through a single port.  The NETDEV_STATs are for individual netdevs
99  * seen at the top of the stack, and the PF_STATs are for the physical
100  * function at the bottom of the stack hosting those netdevs.
101  *
102  * The PF_STATs are appended to the netdev stats only when ethtool -S
103  * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
104  */
105 static struct i40e_stats i40e_gstrings_stats[] = {
106 	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
107 	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
108 	I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
109 	I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
110 	I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
111 	I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
112 	I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
113 	I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
114 	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
115 	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
116 	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
117 	I40E_PF_STAT("crc_errors", stats.crc_errors),
118 	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
119 	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
120 	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
121 	I40E_PF_STAT("tx_timeout", tx_timeout_count),
122 	I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
123 	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
124 	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
125 	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
126 	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
127 	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
128 	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
129 	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
130 	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
131 	I40E_PF_STAT("rx_size_511", stats.rx_size_511),
132 	I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
133 	I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
134 	I40E_PF_STAT("rx_size_big", stats.rx_size_big),
135 	I40E_PF_STAT("tx_size_64", stats.tx_size_64),
136 	I40E_PF_STAT("tx_size_127", stats.tx_size_127),
137 	I40E_PF_STAT("tx_size_255", stats.tx_size_255),
138 	I40E_PF_STAT("tx_size_511", stats.tx_size_511),
139 	I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
140 	I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
141 	I40E_PF_STAT("tx_size_big", stats.tx_size_big),
142 	I40E_PF_STAT("rx_undersize", stats.rx_undersize),
143 	I40E_PF_STAT("rx_fragments", stats.rx_fragments),
144 	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
145 	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
146 	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
147 	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
149 	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
150 	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
151 
152 	/* LPI stats */
153 	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
154 	I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
155 	I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
156 	I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
157 };
158 
159 #ifdef I40E_FCOE
160 static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
161 	I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
162 	I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
163 	I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
164 	I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
165 	I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
166 	I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
167 	I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
168 	I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
169 };
170 
171 #endif /* I40E_FCOE */
172 #define I40E_QUEUE_STATS_LEN(n) \
173 	(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
174 	    * 2 /* Tx and Rx together */                                     \
175 	    * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
176 #define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
177 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
178 #define I40E_MISC_STATS_LEN	ARRAY_SIZE(i40e_gstrings_misc_stats)
179 #ifdef I40E_FCOE
180 #define I40E_FCOE_STATS_LEN	ARRAY_SIZE(i40e_gstrings_fcoe_stats)
181 #define I40E_VSI_STATS_LEN(n)	(I40E_NETDEV_STATS_LEN + \
182 				 I40E_FCOE_STATS_LEN + \
183 				 I40E_MISC_STATS_LEN + \
184 				 I40E_QUEUE_STATS_LEN((n)))
185 #else
186 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
187 				 I40E_MISC_STATS_LEN + \
188 				 I40E_QUEUE_STATS_LEN((n)))
189 #endif /* I40E_FCOE */
190 #define I40E_PFC_STATS_LEN ( \
191 		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
192 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
193 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
194 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
195 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
196 		 / sizeof(u64))
197 #define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)
198 #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
199 				 I40E_PFC_STATS_LEN + \
200 				 I40E_VSI_STATS_LEN((n)))
201 
202 enum i40e_ethtool_test_id {
203 	I40E_ETH_TEST_REG = 0,
204 	I40E_ETH_TEST_EEPROM,
205 	I40E_ETH_TEST_INTR,
206 	I40E_ETH_TEST_LOOPBACK,
207 	I40E_ETH_TEST_LINK,
208 };
209 
210 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
211 	"Register test  (offline)",
212 	"Eeprom test    (offline)",
213 	"Interrupt test (offline)",
214 	"Loopback test  (offline)",
215 	"Link test   (on/offline)"
216 };
217 
218 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
219 
220 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
221 	"NPAR",
222 };
223 
224 #define I40E_PRIV_FLAGS_STR_LEN \
225 	(sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
226 
227 /**
228  * i40e_partition_setting_complaint - generic complaint for MFP restriction
229  * @pf: the PF struct
230  **/
231 static void i40e_partition_setting_complaint(struct i40e_pf *pf)
232 {
233 	dev_info(&pf->pdev->dev,
234 		 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
235 }
236 
237 /**
238  * i40e_get_settings_link_up - Get the Link settings for when link is up
239  * @hw: hw structure
240  * @ecmd: ethtool command to fill in
241  * @netdev: network interface device structure
242  *
243  **/
244 static void i40e_get_settings_link_up(struct i40e_hw *hw,
245 				      struct ethtool_cmd *ecmd,
246 				      struct net_device *netdev)
247 {
248 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
249 	u32 link_speed = hw_link_info->link_speed;
250 
251 	/* Initialize supported and advertised settings based on phy settings */
252 	switch (hw_link_info->phy_type) {
253 	case I40E_PHY_TYPE_40GBASE_CR4:
254 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
255 		ecmd->supported = SUPPORTED_Autoneg |
256 				  SUPPORTED_40000baseCR4_Full;
257 		ecmd->advertising = ADVERTISED_Autoneg |
258 				    ADVERTISED_40000baseCR4_Full;
259 		break;
260 	case I40E_PHY_TYPE_XLAUI:
261 	case I40E_PHY_TYPE_XLPPI:
262 	case I40E_PHY_TYPE_40GBASE_AOC:
263 		ecmd->supported = SUPPORTED_40000baseCR4_Full;
264 		break;
265 	case I40E_PHY_TYPE_40GBASE_KR4:
266 		ecmd->supported = SUPPORTED_Autoneg |
267 				  SUPPORTED_40000baseKR4_Full;
268 		ecmd->advertising = ADVERTISED_Autoneg |
269 				    ADVERTISED_40000baseKR4_Full;
270 		break;
271 	case I40E_PHY_TYPE_40GBASE_SR4:
272 		ecmd->supported = SUPPORTED_40000baseSR4_Full;
273 		break;
274 	case I40E_PHY_TYPE_40GBASE_LR4:
275 		ecmd->supported = SUPPORTED_40000baseLR4_Full;
276 		break;
277 	case I40E_PHY_TYPE_20GBASE_KR2:
278 		ecmd->supported = SUPPORTED_Autoneg |
279 				  SUPPORTED_20000baseKR2_Full;
280 		ecmd->advertising = ADVERTISED_Autoneg |
281 				    ADVERTISED_20000baseKR2_Full;
282 		break;
283 	case I40E_PHY_TYPE_10GBASE_KX4:
284 		ecmd->supported = SUPPORTED_Autoneg |
285 				  SUPPORTED_10000baseKX4_Full;
286 		ecmd->advertising = ADVERTISED_Autoneg |
287 				    ADVERTISED_10000baseKX4_Full;
288 		break;
289 	case I40E_PHY_TYPE_10GBASE_KR:
290 		ecmd->supported = SUPPORTED_Autoneg |
291 				  SUPPORTED_10000baseKR_Full;
292 		ecmd->advertising = ADVERTISED_Autoneg |
293 				    ADVERTISED_10000baseKR_Full;
294 		break;
295 	case I40E_PHY_TYPE_10GBASE_SR:
296 	case I40E_PHY_TYPE_10GBASE_LR:
297 	case I40E_PHY_TYPE_1000BASE_SX:
298 	case I40E_PHY_TYPE_1000BASE_LX:
299 		ecmd->supported = SUPPORTED_10000baseT_Full |
300 				  SUPPORTED_1000baseT_Full;
301 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
302 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
303 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
304 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
305 		break;
306 	case I40E_PHY_TYPE_1000BASE_KX:
307 		ecmd->supported = SUPPORTED_Autoneg |
308 				  SUPPORTED_1000baseKX_Full;
309 		ecmd->advertising = ADVERTISED_Autoneg |
310 				    ADVERTISED_1000baseKX_Full;
311 		break;
312 	case I40E_PHY_TYPE_10GBASE_T:
313 	case I40E_PHY_TYPE_1000BASE_T:
314 	case I40E_PHY_TYPE_100BASE_TX:
315 		ecmd->supported = SUPPORTED_Autoneg |
316 				  SUPPORTED_10000baseT_Full |
317 				  SUPPORTED_1000baseT_Full |
318 				  SUPPORTED_100baseT_Full;
319 		ecmd->advertising = ADVERTISED_Autoneg;
320 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
321 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
322 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
323 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
324 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
325 			ecmd->advertising |= ADVERTISED_100baseT_Full;
326 		break;
327 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
328 	case I40E_PHY_TYPE_10GBASE_CR1:
329 		ecmd->supported = SUPPORTED_Autoneg |
330 				  SUPPORTED_10000baseT_Full;
331 		ecmd->advertising = ADVERTISED_Autoneg |
332 				    ADVERTISED_10000baseT_Full;
333 		break;
334 	case I40E_PHY_TYPE_XAUI:
335 	case I40E_PHY_TYPE_XFI:
336 	case I40E_PHY_TYPE_SFI:
337 	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
338 	case I40E_PHY_TYPE_10GBASE_AOC:
339 		ecmd->supported = SUPPORTED_10000baseT_Full;
340 		break;
341 	case I40E_PHY_TYPE_SGMII:
342 		ecmd->supported = SUPPORTED_Autoneg |
343 				  SUPPORTED_1000baseT_Full |
344 				  SUPPORTED_100baseT_Full;
345 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
346 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
347 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
348 			ecmd->advertising |= ADVERTISED_100baseT_Full;
349 		break;
350 	default:
351 		/* if we got here and link is up something bad is afoot */
352 		netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
353 			    hw_link_info->phy_type);
354 	}
355 
356 	/* Set speed and duplex */
357 	switch (link_speed) {
358 	case I40E_LINK_SPEED_40GB:
359 		ethtool_cmd_speed_set(ecmd, SPEED_40000);
360 		break;
361 	case I40E_LINK_SPEED_20GB:
362 		ethtool_cmd_speed_set(ecmd, SPEED_20000);
363 		break;
364 	case I40E_LINK_SPEED_10GB:
365 		ethtool_cmd_speed_set(ecmd, SPEED_10000);
366 		break;
367 	case I40E_LINK_SPEED_1GB:
368 		ethtool_cmd_speed_set(ecmd, SPEED_1000);
369 		break;
370 	case I40E_LINK_SPEED_100MB:
371 		ethtool_cmd_speed_set(ecmd, SPEED_100);
372 		break;
373 	default:
374 		break;
375 	}
376 	ecmd->duplex = DUPLEX_FULL;
377 }
378 
379 /**
380  * i40e_get_settings_link_down - Get the Link settings for when link is down
381  * @hw: hw structure
382  * @ecmd: ethtool command to fill in
383  *
384  * Reports link settings that can be determined when link is down
385  **/
386 static void i40e_get_settings_link_down(struct i40e_hw *hw,
387 					struct ethtool_cmd *ecmd)
388 {
389 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
390 
391 	/* link is down and the driver needs to fall back on
392 	 * device ID to determine what kinds of info to display,
393 	 * it's mostly a guess that may change when link is up
394 	 */
395 	switch (hw->device_id) {
396 	case I40E_DEV_ID_QSFP_A:
397 	case I40E_DEV_ID_QSFP_B:
398 	case I40E_DEV_ID_QSFP_C:
399 		/* pluggable QSFP */
400 		ecmd->supported = SUPPORTED_40000baseSR4_Full |
401 				  SUPPORTED_40000baseCR4_Full |
402 				  SUPPORTED_40000baseLR4_Full;
403 		ecmd->advertising = ADVERTISED_40000baseSR4_Full |
404 				    ADVERTISED_40000baseCR4_Full |
405 				    ADVERTISED_40000baseLR4_Full;
406 		break;
407 	case I40E_DEV_ID_KX_B:
408 		/* backplane 40G */
409 		ecmd->supported = SUPPORTED_40000baseKR4_Full;
410 		ecmd->advertising = ADVERTISED_40000baseKR4_Full;
411 		break;
412 	case I40E_DEV_ID_KX_C:
413 		/* backplane 10G */
414 		ecmd->supported = SUPPORTED_10000baseKR_Full;
415 		ecmd->advertising = ADVERTISED_10000baseKR_Full;
416 		break;
417 	case I40E_DEV_ID_10G_BASE_T:
418 		ecmd->supported = SUPPORTED_10000baseT_Full |
419 				  SUPPORTED_1000baseT_Full |
420 				  SUPPORTED_100baseT_Full;
421 		/* Figure out what has been requested */
422 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
423 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
424 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
425 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
426 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
427 			ecmd->advertising |= ADVERTISED_100baseT_Full;
428 		break;
429 	case I40E_DEV_ID_20G_KR2:
430 		/* backplane 20G */
431 		ecmd->supported = SUPPORTED_20000baseKR2_Full;
432 		ecmd->advertising = ADVERTISED_20000baseKR2_Full;
433 		break;
434 	default:
435 		/* all the rest are 10G/1G */
436 		ecmd->supported = SUPPORTED_10000baseT_Full |
437 				  SUPPORTED_1000baseT_Full;
438 		/* Figure out what has been requested */
439 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
440 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
441 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
442 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
443 		break;
444 	}
445 
446 	/* With no link speed and duplex are unknown */
447 	ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
448 	ecmd->duplex = DUPLEX_UNKNOWN;
449 }
450 
451 /**
452  * i40e_get_settings - Get Link Speed and Duplex settings
453  * @netdev: network interface device structure
454  * @ecmd: ethtool command
455  *
456  * Reports speed/duplex settings based on media_type
457  **/
458 static int i40e_get_settings(struct net_device *netdev,
459 			     struct ethtool_cmd *ecmd)
460 {
461 	struct i40e_netdev_priv *np = netdev_priv(netdev);
462 	struct i40e_pf *pf = np->vsi->back;
463 	struct i40e_hw *hw = &pf->hw;
464 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
465 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
466 
467 	if (link_up)
468 		i40e_get_settings_link_up(hw, ecmd, netdev);
469 	else
470 		i40e_get_settings_link_down(hw, ecmd);
471 
472 	/* Now set the settings that don't rely on link being up/down */
473 
474 	/* Set autoneg settings */
475 	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
476 			  AUTONEG_ENABLE : AUTONEG_DISABLE);
477 
478 	switch (hw->phy.media_type) {
479 	case I40E_MEDIA_TYPE_BACKPLANE:
480 		ecmd->supported |= SUPPORTED_Autoneg |
481 				   SUPPORTED_Backplane;
482 		ecmd->advertising |= ADVERTISED_Autoneg |
483 				     ADVERTISED_Backplane;
484 		ecmd->port = PORT_NONE;
485 		break;
486 	case I40E_MEDIA_TYPE_BASET:
487 		ecmd->supported |= SUPPORTED_TP;
488 		ecmd->advertising |= ADVERTISED_TP;
489 		ecmd->port = PORT_TP;
490 		break;
491 	case I40E_MEDIA_TYPE_DA:
492 	case I40E_MEDIA_TYPE_CX4:
493 		ecmd->supported |= SUPPORTED_FIBRE;
494 		ecmd->advertising |= ADVERTISED_FIBRE;
495 		ecmd->port = PORT_DA;
496 		break;
497 	case I40E_MEDIA_TYPE_FIBER:
498 		ecmd->supported |= SUPPORTED_FIBRE;
499 		ecmd->port = PORT_FIBRE;
500 		break;
501 	case I40E_MEDIA_TYPE_UNKNOWN:
502 	default:
503 		ecmd->port = PORT_OTHER;
504 		break;
505 	}
506 
507 	/* Set transceiver */
508 	ecmd->transceiver = XCVR_EXTERNAL;
509 
510 	/* Set flow control settings */
511 	ecmd->supported |= SUPPORTED_Pause;
512 
513 	switch (hw->fc.requested_mode) {
514 	case I40E_FC_FULL:
515 		ecmd->advertising |= ADVERTISED_Pause;
516 		break;
517 	case I40E_FC_TX_PAUSE:
518 		ecmd->advertising |= ADVERTISED_Asym_Pause;
519 		break;
520 	case I40E_FC_RX_PAUSE:
521 		ecmd->advertising |= (ADVERTISED_Pause |
522 				      ADVERTISED_Asym_Pause);
523 		break;
524 	default:
525 		ecmd->advertising &= ~(ADVERTISED_Pause |
526 				       ADVERTISED_Asym_Pause);
527 		break;
528 	}
529 
530 	return 0;
531 }
532 
533 /**
534  * i40e_set_settings - Set Speed and Duplex
535  * @netdev: network interface device structure
536  * @ecmd: ethtool command
537  *
538  * Set speed/duplex per media_types advertised/forced
539  **/
540 static int i40e_set_settings(struct net_device *netdev,
541 			     struct ethtool_cmd *ecmd)
542 {
543 	struct i40e_netdev_priv *np = netdev_priv(netdev);
544 	struct i40e_aq_get_phy_abilities_resp abilities;
545 	struct i40e_aq_set_phy_config config;
546 	struct i40e_pf *pf = np->vsi->back;
547 	struct i40e_vsi *vsi = np->vsi;
548 	struct i40e_hw *hw = &pf->hw;
549 	struct ethtool_cmd safe_ecmd;
550 	i40e_status status = 0;
551 	bool change = false;
552 	int err = 0;
553 	u8 autoneg;
554 	u32 advertise;
555 
556 	/* Changing port settings is not supported if this isn't the
557 	 * port's controlling PF
558 	 */
559 	if (hw->partition_id != 1) {
560 		i40e_partition_setting_complaint(pf);
561 		return -EOPNOTSUPP;
562 	}
563 
564 	if (vsi != pf->vsi[pf->lan_vsi])
565 		return -EOPNOTSUPP;
566 
567 	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
568 	    hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
569 	    hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
570 	    hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
571 		return -EOPNOTSUPP;
572 
573 	/* get our own copy of the bits to check against */
574 	memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
575 	i40e_get_settings(netdev, &safe_ecmd);
576 
577 	/* save autoneg and speed out of ecmd */
578 	autoneg = ecmd->autoneg;
579 	advertise = ecmd->advertising;
580 
581 	/* set autoneg and speed back to what they currently are */
582 	ecmd->autoneg = safe_ecmd.autoneg;
583 	ecmd->advertising = safe_ecmd.advertising;
584 
585 	ecmd->cmd = safe_ecmd.cmd;
586 	/* If ecmd and safe_ecmd are not the same now, then they are
587 	 * trying to set something that we do not support
588 	 */
589 	if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
590 		return -EOPNOTSUPP;
591 
592 	while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
593 		usleep_range(1000, 2000);
594 
595 	/* Get the current phy config */
596 	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
597 					      NULL);
598 	if (status)
599 		return -EAGAIN;
600 
601 	/* Copy abilities to config in case autoneg is not
602 	 * set below
603 	 */
604 	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
605 	config.abilities = abilities.abilities;
606 
607 	/* Check autoneg */
608 	if (autoneg == AUTONEG_ENABLE) {
609 		/* If autoneg is not supported, return error */
610 		if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
611 			netdev_info(netdev, "Autoneg not supported on this phy\n");
612 			return -EINVAL;
613 		}
614 		/* If autoneg was not already enabled */
615 		if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
616 			config.abilities = abilities.abilities |
617 					   I40E_AQ_PHY_ENABLE_AN;
618 			change = true;
619 		}
620 	} else {
621 		/* If autoneg is supported 10GBASE_T is the only phy that
622 		 * can disable it, so otherwise return error
623 		 */
624 		if (safe_ecmd.supported & SUPPORTED_Autoneg &&
625 		    hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
626 			netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
627 			return -EINVAL;
628 		}
629 		/* If autoneg is currently enabled */
630 		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
631 			config.abilities = abilities.abilities &
632 					   ~I40E_AQ_PHY_ENABLE_AN;
633 			change = true;
634 		}
635 	}
636 
637 	if (advertise & ~safe_ecmd.supported)
638 		return -EINVAL;
639 
640 	if (advertise & ADVERTISED_100baseT_Full)
641 		config.link_speed |= I40E_LINK_SPEED_100MB;
642 	if (advertise & ADVERTISED_1000baseT_Full ||
643 	    advertise & ADVERTISED_1000baseKX_Full)
644 		config.link_speed |= I40E_LINK_SPEED_1GB;
645 	if (advertise & ADVERTISED_10000baseT_Full ||
646 	    advertise & ADVERTISED_10000baseKX4_Full ||
647 	    advertise & ADVERTISED_10000baseKR_Full)
648 		config.link_speed |= I40E_LINK_SPEED_10GB;
649 	if (advertise & ADVERTISED_20000baseKR2_Full)
650 		config.link_speed |= I40E_LINK_SPEED_20GB;
651 	if (advertise & ADVERTISED_40000baseKR4_Full ||
652 	    advertise & ADVERTISED_40000baseCR4_Full ||
653 	    advertise & ADVERTISED_40000baseSR4_Full ||
654 	    advertise & ADVERTISED_40000baseLR4_Full)
655 		config.link_speed |= I40E_LINK_SPEED_40GB;
656 
657 	if (change || (abilities.link_speed != config.link_speed)) {
658 		/* copy over the rest of the abilities */
659 		config.phy_type = abilities.phy_type;
660 		config.eee_capability = abilities.eee_capability;
661 		config.eeer = abilities.eeer_val;
662 		config.low_power_ctrl = abilities.d3_lpan;
663 
664 		/* save the requested speeds */
665 		hw->phy.link_info.requested_speeds = config.link_speed;
666 		/* set link and auto negotiation so changes take effect */
667 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
668 		/* If link is up put link down */
669 		if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
670 			/* Tell the OS link is going down, the link will go
671 			 * back up when fw says it is ready asynchronously
672 			 */
673 			netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
674 			netif_carrier_off(netdev);
675 			netif_tx_stop_all_queues(netdev);
676 		}
677 
678 		/* make the aq call */
679 		status = i40e_aq_set_phy_config(hw, &config, NULL);
680 		if (status) {
681 			netdev_info(netdev, "Set phy config failed with error %d.\n",
682 				    status);
683 			return -EAGAIN;
684 		}
685 
686 		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
687 		if (status)
688 			netdev_info(netdev, "Updating link info failed with error %d\n",
689 				    status);
690 
691 	} else {
692 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
693 	}
694 
695 	return err;
696 }
697 
698 static int i40e_nway_reset(struct net_device *netdev)
699 {
700 	/* restart autonegotiation */
701 	struct i40e_netdev_priv *np = netdev_priv(netdev);
702 	struct i40e_pf *pf = np->vsi->back;
703 	struct i40e_hw *hw = &pf->hw;
704 	bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
705 	i40e_status ret = 0;
706 
707 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
708 	if (ret) {
709 		netdev_info(netdev, "link restart failed, aq_err=%d\n",
710 			    pf->hw.aq.asq_last_status);
711 		return -EIO;
712 	}
713 
714 	return 0;
715 }
716 
717 /**
718  * i40e_get_pauseparam -  Get Flow Control status
719  * Return tx/rx-pause status
720  **/
721 static void i40e_get_pauseparam(struct net_device *netdev,
722 				struct ethtool_pauseparam *pause)
723 {
724 	struct i40e_netdev_priv *np = netdev_priv(netdev);
725 	struct i40e_pf *pf = np->vsi->back;
726 	struct i40e_hw *hw = &pf->hw;
727 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
728 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
729 
730 	pause->autoneg =
731 		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
732 		  AUTONEG_ENABLE : AUTONEG_DISABLE);
733 
734 	/* PFC enabled so report LFC as off */
735 	if (dcbx_cfg->pfc.pfcenable) {
736 		pause->rx_pause = 0;
737 		pause->tx_pause = 0;
738 		return;
739 	}
740 
741 	if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
742 		pause->rx_pause = 1;
743 	} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
744 		pause->tx_pause = 1;
745 	} else if (hw->fc.current_mode == I40E_FC_FULL) {
746 		pause->rx_pause = 1;
747 		pause->tx_pause = 1;
748 	}
749 }
750 
751 /**
752  * i40e_set_pauseparam - Set Flow Control parameter
753  * @netdev: network interface device structure
754  * @pause: return tx/rx flow control status
755  **/
756 static int i40e_set_pauseparam(struct net_device *netdev,
757 			       struct ethtool_pauseparam *pause)
758 {
759 	struct i40e_netdev_priv *np = netdev_priv(netdev);
760 	struct i40e_pf *pf = np->vsi->back;
761 	struct i40e_vsi *vsi = np->vsi;
762 	struct i40e_hw *hw = &pf->hw;
763 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
764 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
765 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
766 	i40e_status status;
767 	u8 aq_failures;
768 	int err = 0;
769 
770 	/* Changing the port's flow control is not supported if this isn't the
771 	 * port's controlling PF
772 	 */
773 	if (hw->partition_id != 1) {
774 		i40e_partition_setting_complaint(pf);
775 		return -EOPNOTSUPP;
776 	}
777 
778 	if (vsi != pf->vsi[pf->lan_vsi])
779 		return -EOPNOTSUPP;
780 
781 	if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
782 	    AUTONEG_ENABLE : AUTONEG_DISABLE)) {
783 		netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
784 		return -EOPNOTSUPP;
785 	}
786 
787 	/* If we have link and don't have autoneg */
788 	if (!test_bit(__I40E_DOWN, &pf->state) &&
789 	    !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
790 		/* Send message that it might not necessarily work*/
791 		netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
792 	}
793 
794 	if (dcbx_cfg->pfc.pfcenable) {
795 		netdev_info(netdev,
796 			    "Priority flow control enabled. Cannot set link flow control.\n");
797 		return -EOPNOTSUPP;
798 	}
799 
800 	if (pause->rx_pause && pause->tx_pause)
801 		hw->fc.requested_mode = I40E_FC_FULL;
802 	else if (pause->rx_pause && !pause->tx_pause)
803 		hw->fc.requested_mode = I40E_FC_RX_PAUSE;
804 	else if (!pause->rx_pause && pause->tx_pause)
805 		hw->fc.requested_mode = I40E_FC_TX_PAUSE;
806 	else if (!pause->rx_pause && !pause->tx_pause)
807 		hw->fc.requested_mode = I40E_FC_NONE;
808 	else
809 		 return -EINVAL;
810 
811 	/* Tell the OS link is going down, the link will go back up when fw
812 	 * says it is ready asynchronously
813 	 */
814 	netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
815 	netif_carrier_off(netdev);
816 	netif_tx_stop_all_queues(netdev);
817 
818 	/* Set the fc mode and only restart an if link is up*/
819 	status = i40e_set_fc(hw, &aq_failures, link_up);
820 
821 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
822 		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
823 			    status, hw->aq.asq_last_status);
824 		err = -EAGAIN;
825 	}
826 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
827 		netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
828 			    status, hw->aq.asq_last_status);
829 		err = -EAGAIN;
830 	}
831 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
832 		netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
833 			    status, hw->aq.asq_last_status);
834 		err = -EAGAIN;
835 	}
836 
837 	if (!test_bit(__I40E_DOWN, &pf->state)) {
838 		/* Give it a little more time to try to come back */
839 		msleep(75);
840 		if (!test_bit(__I40E_DOWN, &pf->state))
841 			return i40e_nway_reset(netdev);
842 	}
843 
844 	return err;
845 }
846 
847 static u32 i40e_get_msglevel(struct net_device *netdev)
848 {
849 	struct i40e_netdev_priv *np = netdev_priv(netdev);
850 	struct i40e_pf *pf = np->vsi->back;
851 
852 	return pf->msg_enable;
853 }
854 
855 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
856 {
857 	struct i40e_netdev_priv *np = netdev_priv(netdev);
858 	struct i40e_pf *pf = np->vsi->back;
859 
860 	if (I40E_DEBUG_USER & data)
861 		pf->hw.debug_mask = data;
862 	pf->msg_enable = data;
863 }
864 
865 static int i40e_get_regs_len(struct net_device *netdev)
866 {
867 	int reg_count = 0;
868 	int i;
869 
870 	for (i = 0; i40e_reg_list[i].offset != 0; i++)
871 		reg_count += i40e_reg_list[i].elements;
872 
873 	return reg_count * sizeof(u32);
874 }
875 
876 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
877 			  void *p)
878 {
879 	struct i40e_netdev_priv *np = netdev_priv(netdev);
880 	struct i40e_pf *pf = np->vsi->back;
881 	struct i40e_hw *hw = &pf->hw;
882 	u32 *reg_buf = p;
883 	int i, j, ri;
884 	u32 reg;
885 
886 	/* Tell ethtool which driver-version-specific regs output we have.
887 	 *
888 	 * At some point, if we have ethtool doing special formatting of
889 	 * this data, it will rely on this version number to know how to
890 	 * interpret things.  Hence, this needs to be updated if/when the
891 	 * diags register table is changed.
892 	 */
893 	regs->version = 1;
894 
895 	/* loop through the diags reg table for what to print */
896 	ri = 0;
897 	for (i = 0; i40e_reg_list[i].offset != 0; i++) {
898 		for (j = 0; j < i40e_reg_list[i].elements; j++) {
899 			reg = i40e_reg_list[i].offset
900 				+ (j * i40e_reg_list[i].stride);
901 			reg_buf[ri++] = rd32(hw, reg);
902 		}
903 	}
904 
905 }
906 
907 static int i40e_get_eeprom(struct net_device *netdev,
908 			   struct ethtool_eeprom *eeprom, u8 *bytes)
909 {
910 	struct i40e_netdev_priv *np = netdev_priv(netdev);
911 	struct i40e_hw *hw = &np->vsi->back->hw;
912 	struct i40e_pf *pf = np->vsi->back;
913 	int ret_val = 0, len, offset;
914 	u8 *eeprom_buff;
915 	u16 i, sectors;
916 	bool last;
917 	u32 magic;
918 
919 #define I40E_NVM_SECTOR_SIZE  4096
920 	if (eeprom->len == 0)
921 		return -EINVAL;
922 
923 	/* check for NVMUpdate access method */
924 	magic = hw->vendor_id | (hw->device_id << 16);
925 	if (eeprom->magic && eeprom->magic != magic) {
926 		struct i40e_nvm_access *cmd;
927 		int errno;
928 
929 		/* make sure it is the right magic for NVMUpdate */
930 		if ((eeprom->magic >> 16) != hw->device_id)
931 			return -EINVAL;
932 
933 		cmd = (struct i40e_nvm_access *)eeprom;
934 		ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
935 		if (ret_val &&
936 		    ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
937 		     (hw->debug_mask & I40E_DEBUG_NVM)))
938 			dev_info(&pf->pdev->dev,
939 				 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
940 				 ret_val, hw->aq.asq_last_status, errno,
941 				 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
942 				 cmd->offset, cmd->data_size);
943 
944 		return errno;
945 	}
946 
947 	/* normal ethtool get_eeprom support */
948 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
949 
950 	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
951 	if (!eeprom_buff)
952 		return -ENOMEM;
953 
954 	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
955 	if (ret_val) {
956 		dev_info(&pf->pdev->dev,
957 			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
958 			 ret_val, hw->aq.asq_last_status);
959 		goto free_buff;
960 	}
961 
962 	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
963 	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
964 	len = I40E_NVM_SECTOR_SIZE;
965 	last = false;
966 	for (i = 0; i < sectors; i++) {
967 		if (i == (sectors - 1)) {
968 			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
969 			last = true;
970 		}
971 		offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
972 		ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
973 				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
974 				last, NULL);
975 		if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
976 			dev_info(&pf->pdev->dev,
977 				 "read NVM failed, invalid offset 0x%x\n",
978 				 offset);
979 			break;
980 		} else if (ret_val &&
981 			   hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
982 			dev_info(&pf->pdev->dev,
983 				 "read NVM failed, access, offset 0x%x\n",
984 				 offset);
985 			break;
986 		} else if (ret_val) {
987 			dev_info(&pf->pdev->dev,
988 				 "read NVM failed offset %d err=%d status=0x%x\n",
989 				 offset, ret_val, hw->aq.asq_last_status);
990 			break;
991 		}
992 	}
993 
994 	i40e_release_nvm(hw);
995 	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
996 free_buff:
997 	kfree(eeprom_buff);
998 	return ret_val;
999 }
1000 
1001 static int i40e_get_eeprom_len(struct net_device *netdev)
1002 {
1003 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1004 	struct i40e_hw *hw = &np->vsi->back->hw;
1005 	u32 val;
1006 
1007 	val = (rd32(hw, I40E_GLPCI_LBARCTRL)
1008 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
1009 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
1010 	/* register returns value in power of 2, 64Kbyte chunks. */
1011 	val = (64 * 1024) * (1 << val);
1012 	return val;
1013 }
1014 
1015 static int i40e_set_eeprom(struct net_device *netdev,
1016 			   struct ethtool_eeprom *eeprom, u8 *bytes)
1017 {
1018 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1019 	struct i40e_hw *hw = &np->vsi->back->hw;
1020 	struct i40e_pf *pf = np->vsi->back;
1021 	struct i40e_nvm_access *cmd;
1022 	int ret_val = 0;
1023 	int errno;
1024 	u32 magic;
1025 
1026 	/* normal ethtool set_eeprom is not supported */
1027 	magic = hw->vendor_id | (hw->device_id << 16);
1028 	if (eeprom->magic == magic)
1029 		return -EOPNOTSUPP;
1030 
1031 	/* check for NVMUpdate access method */
1032 	if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
1033 		return -EINVAL;
1034 
1035 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
1036 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
1037 		return -EBUSY;
1038 
1039 	cmd = (struct i40e_nvm_access *)eeprom;
1040 	ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
1041 	if (ret_val &&
1042 	    ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
1043 	      hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
1044 	     (hw->debug_mask & I40E_DEBUG_NVM)))
1045 		dev_info(&pf->pdev->dev,
1046 			 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
1047 			 ret_val, hw->aq.asq_last_status, errno,
1048 			 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
1049 			 cmd->offset, cmd->data_size);
1050 
1051 	return errno;
1052 }
1053 
1054 static void i40e_get_drvinfo(struct net_device *netdev,
1055 			     struct ethtool_drvinfo *drvinfo)
1056 {
1057 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1058 	struct i40e_vsi *vsi = np->vsi;
1059 	struct i40e_pf *pf = vsi->back;
1060 
1061 	strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
1062 	strlcpy(drvinfo->version, i40e_driver_version_str,
1063 		sizeof(drvinfo->version));
1064 	strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
1065 		sizeof(drvinfo->fw_version));
1066 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
1067 		sizeof(drvinfo->bus_info));
1068 	drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
1069 }
1070 
1071 static void i40e_get_ringparam(struct net_device *netdev,
1072 			       struct ethtool_ringparam *ring)
1073 {
1074 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1075 	struct i40e_pf *pf = np->vsi->back;
1076 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
1077 
1078 	ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1079 	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1080 	ring->rx_mini_max_pending = 0;
1081 	ring->rx_jumbo_max_pending = 0;
1082 	ring->rx_pending = vsi->rx_rings[0]->count;
1083 	ring->tx_pending = vsi->tx_rings[0]->count;
1084 	ring->rx_mini_pending = 0;
1085 	ring->rx_jumbo_pending = 0;
1086 }
1087 
1088 static int i40e_set_ringparam(struct net_device *netdev,
1089 			      struct ethtool_ringparam *ring)
1090 {
1091 	struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
1092 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1093 	struct i40e_vsi *vsi = np->vsi;
1094 	struct i40e_pf *pf = vsi->back;
1095 	u32 new_rx_count, new_tx_count;
1096 	int i, err = 0;
1097 
1098 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1099 		return -EINVAL;
1100 
1101 	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1102 	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
1103 	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1104 	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
1105 		netdev_info(netdev,
1106 			    "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
1107 			    ring->tx_pending, ring->rx_pending,
1108 			    I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
1109 		return -EINVAL;
1110 	}
1111 
1112 	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1113 	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1114 
1115 	/* if nothing to do return success */
1116 	if ((new_tx_count == vsi->tx_rings[0]->count) &&
1117 	    (new_rx_count == vsi->rx_rings[0]->count))
1118 		return 0;
1119 
1120 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
1121 		usleep_range(1000, 2000);
1122 
1123 	if (!netif_running(vsi->netdev)) {
1124 		/* simple case - set for the next time the netdev is started */
1125 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1126 			vsi->tx_rings[i]->count = new_tx_count;
1127 			vsi->rx_rings[i]->count = new_rx_count;
1128 		}
1129 		goto done;
1130 	}
1131 
1132 	/* We can't just free everything and then setup again,
1133 	 * because the ISRs in MSI-X mode get passed pointers
1134 	 * to the Tx and Rx ring structs.
1135 	 */
1136 
1137 	/* alloc updated Tx resources */
1138 	if (new_tx_count != vsi->tx_rings[0]->count) {
1139 		netdev_info(netdev,
1140 			    "Changing Tx descriptor count from %d to %d.\n",
1141 			    vsi->tx_rings[0]->count, new_tx_count);
1142 		tx_rings = kcalloc(vsi->alloc_queue_pairs,
1143 				   sizeof(struct i40e_ring), GFP_KERNEL);
1144 		if (!tx_rings) {
1145 			err = -ENOMEM;
1146 			goto done;
1147 		}
1148 
1149 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1150 			/* clone ring and setup updated count */
1151 			tx_rings[i] = *vsi->tx_rings[i];
1152 			tx_rings[i].count = new_tx_count;
1153 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
1154 			if (err) {
1155 				while (i) {
1156 					i--;
1157 					i40e_free_tx_resources(&tx_rings[i]);
1158 				}
1159 				kfree(tx_rings);
1160 				tx_rings = NULL;
1161 
1162 				goto done;
1163 			}
1164 		}
1165 	}
1166 
1167 	/* alloc updated Rx resources */
1168 	if (new_rx_count != vsi->rx_rings[0]->count) {
1169 		netdev_info(netdev,
1170 			    "Changing Rx descriptor count from %d to %d\n",
1171 			    vsi->rx_rings[0]->count, new_rx_count);
1172 		rx_rings = kcalloc(vsi->alloc_queue_pairs,
1173 				   sizeof(struct i40e_ring), GFP_KERNEL);
1174 		if (!rx_rings) {
1175 			err = -ENOMEM;
1176 			goto free_tx;
1177 		}
1178 
1179 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1180 			/* clone ring and setup updated count */
1181 			rx_rings[i] = *vsi->rx_rings[i];
1182 			rx_rings[i].count = new_rx_count;
1183 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
1184 			if (err) {
1185 				while (i) {
1186 					i--;
1187 					i40e_free_rx_resources(&rx_rings[i]);
1188 				}
1189 				kfree(rx_rings);
1190 				rx_rings = NULL;
1191 
1192 				goto free_tx;
1193 			}
1194 		}
1195 	}
1196 
1197 	/* Bring interface down, copy in the new ring info,
1198 	 * then restore the interface
1199 	 */
1200 	i40e_down(vsi);
1201 
1202 	if (tx_rings) {
1203 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1204 			i40e_free_tx_resources(vsi->tx_rings[i]);
1205 			*vsi->tx_rings[i] = tx_rings[i];
1206 		}
1207 		kfree(tx_rings);
1208 		tx_rings = NULL;
1209 	}
1210 
1211 	if (rx_rings) {
1212 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1213 			i40e_free_rx_resources(vsi->rx_rings[i]);
1214 			*vsi->rx_rings[i] = rx_rings[i];
1215 		}
1216 		kfree(rx_rings);
1217 		rx_rings = NULL;
1218 	}
1219 
1220 	i40e_up(vsi);
1221 
1222 free_tx:
1223 	/* error cleanup if the Rx allocations failed after getting Tx */
1224 	if (tx_rings) {
1225 		for (i = 0; i < vsi->num_queue_pairs; i++)
1226 			i40e_free_tx_resources(&tx_rings[i]);
1227 		kfree(tx_rings);
1228 		tx_rings = NULL;
1229 	}
1230 
1231 done:
1232 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
1233 
1234 	return err;
1235 }
1236 
1237 static int i40e_get_sset_count(struct net_device *netdev, int sset)
1238 {
1239 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1240 	struct i40e_vsi *vsi = np->vsi;
1241 	struct i40e_pf *pf = vsi->back;
1242 
1243 	switch (sset) {
1244 	case ETH_SS_TEST:
1245 		return I40E_TEST_LEN;
1246 	case ETH_SS_STATS:
1247 		if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
1248 			int len = I40E_PF_STATS_LEN(netdev);
1249 
1250 			if (pf->lan_veb != I40E_NO_VEB)
1251 				len += I40E_VEB_STATS_LEN;
1252 			return len;
1253 		} else {
1254 			return I40E_VSI_STATS_LEN(netdev);
1255 		}
1256 	case ETH_SS_PRIV_FLAGS:
1257 		return I40E_PRIV_FLAGS_STR_LEN;
1258 	default:
1259 		return -EOPNOTSUPP;
1260 	}
1261 }
1262 
1263 static void i40e_get_ethtool_stats(struct net_device *netdev,
1264 				   struct ethtool_stats *stats, u64 *data)
1265 {
1266 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1267 	struct i40e_ring *tx_ring, *rx_ring;
1268 	struct i40e_vsi *vsi = np->vsi;
1269 	struct i40e_pf *pf = vsi->back;
1270 	int i = 0;
1271 	char *p;
1272 	int j;
1273 	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
1274 	unsigned int start;
1275 
1276 	i40e_update_stats(vsi);
1277 
1278 	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
1279 		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
1280 		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
1281 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1282 	}
1283 	for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
1284 		p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
1285 		data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
1286 			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1287 	}
1288 #ifdef I40E_FCOE
1289 	for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
1290 		p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
1291 		data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
1292 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1293 	}
1294 #endif
1295 	rcu_read_lock();
1296 	for (j = 0; j < vsi->num_queue_pairs; j++) {
1297 		tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
1298 
1299 		if (!tx_ring)
1300 			continue;
1301 
1302 		/* process Tx ring statistics */
1303 		do {
1304 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
1305 			data[i] = tx_ring->stats.packets;
1306 			data[i + 1] = tx_ring->stats.bytes;
1307 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1308 		i += 2;
1309 
1310 		/* Rx ring is the 2nd half of the queue pair */
1311 		rx_ring = &tx_ring[1];
1312 		do {
1313 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
1314 			data[i] = rx_ring->stats.packets;
1315 			data[i + 1] = rx_ring->stats.bytes;
1316 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
1317 		i += 2;
1318 	}
1319 	rcu_read_unlock();
1320 	if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
1321 		return;
1322 
1323 	if (pf->lan_veb != I40E_NO_VEB) {
1324 		struct i40e_veb *veb = pf->veb[pf->lan_veb];
1325 		for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
1326 			p = (char *)veb;
1327 			p += i40e_gstrings_veb_stats[j].stat_offset;
1328 			data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
1329 				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1330 		}
1331 	}
1332 	for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
1333 		p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
1334 		data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
1335 			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1336 	}
1337 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1338 		data[i++] = pf->stats.priority_xon_tx[j];
1339 		data[i++] = pf->stats.priority_xoff_tx[j];
1340 	}
1341 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1342 		data[i++] = pf->stats.priority_xon_rx[j];
1343 		data[i++] = pf->stats.priority_xoff_rx[j];
1344 	}
1345 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
1346 		data[i++] = pf->stats.priority_xon_2_xoff[j];
1347 }
1348 
1349 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
1350 			     u8 *data)
1351 {
1352 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1353 	struct i40e_vsi *vsi = np->vsi;
1354 	struct i40e_pf *pf = vsi->back;
1355 	char *p = (char *)data;
1356 	int i;
1357 
1358 	switch (stringset) {
1359 	case ETH_SS_TEST:
1360 		for (i = 0; i < I40E_TEST_LEN; i++) {
1361 			memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
1362 			data += ETH_GSTRING_LEN;
1363 		}
1364 		break;
1365 	case ETH_SS_STATS:
1366 		for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
1367 			snprintf(p, ETH_GSTRING_LEN, "%s",
1368 				 i40e_gstrings_net_stats[i].stat_string);
1369 			p += ETH_GSTRING_LEN;
1370 		}
1371 		for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
1372 			snprintf(p, ETH_GSTRING_LEN, "%s",
1373 				 i40e_gstrings_misc_stats[i].stat_string);
1374 			p += ETH_GSTRING_LEN;
1375 		}
1376 #ifdef I40E_FCOE
1377 		for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
1378 			snprintf(p, ETH_GSTRING_LEN, "%s",
1379 				 i40e_gstrings_fcoe_stats[i].stat_string);
1380 			p += ETH_GSTRING_LEN;
1381 		}
1382 #endif
1383 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1384 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
1385 			p += ETH_GSTRING_LEN;
1386 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
1387 			p += ETH_GSTRING_LEN;
1388 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
1389 			p += ETH_GSTRING_LEN;
1390 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
1391 			p += ETH_GSTRING_LEN;
1392 		}
1393 		if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
1394 			return;
1395 
1396 		if (pf->lan_veb != I40E_NO_VEB) {
1397 			for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
1398 				snprintf(p, ETH_GSTRING_LEN, "veb.%s",
1399 					i40e_gstrings_veb_stats[i].stat_string);
1400 				p += ETH_GSTRING_LEN;
1401 			}
1402 		}
1403 		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
1404 			snprintf(p, ETH_GSTRING_LEN, "port.%s",
1405 				 i40e_gstrings_stats[i].stat_string);
1406 			p += ETH_GSTRING_LEN;
1407 		}
1408 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1409 			snprintf(p, ETH_GSTRING_LEN,
1410 				 "port.tx_priority_%u_xon", i);
1411 			p += ETH_GSTRING_LEN;
1412 			snprintf(p, ETH_GSTRING_LEN,
1413 				 "port.tx_priority_%u_xoff", i);
1414 			p += ETH_GSTRING_LEN;
1415 		}
1416 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1417 			snprintf(p, ETH_GSTRING_LEN,
1418 				 "port.rx_priority_%u_xon", i);
1419 			p += ETH_GSTRING_LEN;
1420 			snprintf(p, ETH_GSTRING_LEN,
1421 				 "port.rx_priority_%u_xoff", i);
1422 			p += ETH_GSTRING_LEN;
1423 		}
1424 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1425 			snprintf(p, ETH_GSTRING_LEN,
1426 				 "port.rx_priority_%u_xon_2_xoff", i);
1427 			p += ETH_GSTRING_LEN;
1428 		}
1429 		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
1430 		break;
1431 	case ETH_SS_PRIV_FLAGS:
1432 		for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
1433 			memcpy(data, i40e_priv_flags_strings[i],
1434 			       ETH_GSTRING_LEN);
1435 			data += ETH_GSTRING_LEN;
1436 		}
1437 		break;
1438 	default:
1439 		break;
1440 	}
1441 }
1442 
1443 static int i40e_get_ts_info(struct net_device *dev,
1444 			    struct ethtool_ts_info *info)
1445 {
1446 	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
1447 
1448 	/* only report HW timestamping if PTP is enabled */
1449 	if (!(pf->flags & I40E_FLAG_PTP))
1450 		return ethtool_op_get_ts_info(dev, info);
1451 
1452 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1453 				SOF_TIMESTAMPING_RX_SOFTWARE |
1454 				SOF_TIMESTAMPING_SOFTWARE |
1455 				SOF_TIMESTAMPING_TX_HARDWARE |
1456 				SOF_TIMESTAMPING_RX_HARDWARE |
1457 				SOF_TIMESTAMPING_RAW_HARDWARE;
1458 
1459 	if (pf->ptp_clock)
1460 		info->phc_index = ptp_clock_index(pf->ptp_clock);
1461 	else
1462 		info->phc_index = -1;
1463 
1464 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1465 
1466 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1467 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1468 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1469 			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1470 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1471 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1472 			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1473 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1474 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1475 			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
1476 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1477 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1478 
1479 	return 0;
1480 }
1481 
1482 static int i40e_link_test(struct net_device *netdev, u64 *data)
1483 {
1484 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1485 	struct i40e_pf *pf = np->vsi->back;
1486 
1487 	netif_info(pf, hw, netdev, "link test\n");
1488 	if (i40e_get_link_status(&pf->hw))
1489 		*data = 0;
1490 	else
1491 		*data = 1;
1492 
1493 	return *data;
1494 }
1495 
1496 static int i40e_reg_test(struct net_device *netdev, u64 *data)
1497 {
1498 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1499 	struct i40e_pf *pf = np->vsi->back;
1500 
1501 	netif_info(pf, hw, netdev, "register test\n");
1502 	*data = i40e_diag_reg_test(&pf->hw);
1503 
1504 	return *data;
1505 }
1506 
1507 static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
1508 {
1509 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1510 	struct i40e_pf *pf = np->vsi->back;
1511 
1512 	netif_info(pf, hw, netdev, "eeprom test\n");
1513 	*data = i40e_diag_eeprom_test(&pf->hw);
1514 
1515 	/* forcebly clear the NVM Update state machine */
1516 	pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
1517 
1518 	return *data;
1519 }
1520 
1521 static int i40e_intr_test(struct net_device *netdev, u64 *data)
1522 {
1523 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1524 	struct i40e_pf *pf = np->vsi->back;
1525 	u16 swc_old = pf->sw_int_count;
1526 
1527 	netif_info(pf, hw, netdev, "interrupt test\n");
1528 	wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
1529 	     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
1530 	      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1531 	      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
1532 	      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
1533 	      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
1534 	usleep_range(1000, 2000);
1535 	*data = (swc_old == pf->sw_int_count);
1536 
1537 	return *data;
1538 }
1539 
1540 static int i40e_loopback_test(struct net_device *netdev, u64 *data)
1541 {
1542 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1543 	struct i40e_pf *pf = np->vsi->back;
1544 
1545 	netif_info(pf, hw, netdev, "loopback test not implemented\n");
1546 	*data = 0;
1547 
1548 	return *data;
1549 }
1550 
1551 static void i40e_diag_test(struct net_device *netdev,
1552 			   struct ethtool_test *eth_test, u64 *data)
1553 {
1554 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1555 	bool if_running = netif_running(netdev);
1556 	struct i40e_pf *pf = np->vsi->back;
1557 
1558 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1559 		/* Offline tests */
1560 		netif_info(pf, drv, netdev, "offline testing starting\n");
1561 
1562 		set_bit(__I40E_TESTING, &pf->state);
1563 		/* If the device is online then take it offline */
1564 		if (if_running)
1565 			/* indicate we're in test mode */
1566 			dev_close(netdev);
1567 		else
1568 			i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
1569 
1570 		/* Link test performed before hardware reset
1571 		 * so autoneg doesn't interfere with test result
1572 		 */
1573 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1574 			eth_test->flags |= ETH_TEST_FL_FAILED;
1575 
1576 		if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
1577 			eth_test->flags |= ETH_TEST_FL_FAILED;
1578 
1579 		if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
1580 			eth_test->flags |= ETH_TEST_FL_FAILED;
1581 
1582 		if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
1583 			eth_test->flags |= ETH_TEST_FL_FAILED;
1584 
1585 		/* run reg test last, a reset is required after it */
1586 		if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
1587 			eth_test->flags |= ETH_TEST_FL_FAILED;
1588 
1589 		clear_bit(__I40E_TESTING, &pf->state);
1590 		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
1591 
1592 		if (if_running)
1593 			dev_open(netdev);
1594 	} else {
1595 		/* Online tests */
1596 		netif_info(pf, drv, netdev, "online testing starting\n");
1597 
1598 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1599 			eth_test->flags |= ETH_TEST_FL_FAILED;
1600 
1601 		/* Offline only tests, not run in online; pass by default */
1602 		data[I40E_ETH_TEST_REG] = 0;
1603 		data[I40E_ETH_TEST_EEPROM] = 0;
1604 		data[I40E_ETH_TEST_INTR] = 0;
1605 		data[I40E_ETH_TEST_LOOPBACK] = 0;
1606 	}
1607 
1608 	netif_info(pf, drv, netdev, "testing finished\n");
1609 }
1610 
1611 static void i40e_get_wol(struct net_device *netdev,
1612 			 struct ethtool_wolinfo *wol)
1613 {
1614 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1615 	struct i40e_pf *pf = np->vsi->back;
1616 	struct i40e_hw *hw = &pf->hw;
1617 	u16 wol_nvm_bits;
1618 
1619 	/* NVM bit on means WoL disabled for the port */
1620 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1621 	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
1622 		wol->supported = 0;
1623 		wol->wolopts = 0;
1624 	} else {
1625 		wol->supported = WAKE_MAGIC;
1626 		wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
1627 	}
1628 }
1629 
1630 /**
1631  * i40e_set_wol - set the WakeOnLAN configuration
1632  * @netdev: the netdev in question
1633  * @wol: the ethtool WoL setting data
1634  **/
1635 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1636 {
1637 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1638 	struct i40e_pf *pf = np->vsi->back;
1639 	struct i40e_vsi *vsi = np->vsi;
1640 	struct i40e_hw *hw = &pf->hw;
1641 	u16 wol_nvm_bits;
1642 
1643 	/* WoL not supported if this isn't the controlling PF on the port */
1644 	if (hw->partition_id != 1) {
1645 		i40e_partition_setting_complaint(pf);
1646 		return -EOPNOTSUPP;
1647 	}
1648 
1649 	if (vsi != pf->vsi[pf->lan_vsi])
1650 		return -EOPNOTSUPP;
1651 
1652 	/* NVM bit on means WoL disabled for the port */
1653 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1654 	if (((1 << hw->port) & wol_nvm_bits))
1655 		return -EOPNOTSUPP;
1656 
1657 	/* only magic packet is supported */
1658 	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
1659 		return -EOPNOTSUPP;
1660 
1661 	/* is this a new value? */
1662 	if (pf->wol_en != !!wol->wolopts) {
1663 		pf->wol_en = !!wol->wolopts;
1664 		device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
1665 	}
1666 
1667 	return 0;
1668 }
1669 
1670 static int i40e_set_phys_id(struct net_device *netdev,
1671 			    enum ethtool_phys_id_state state)
1672 {
1673 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1674 	struct i40e_pf *pf = np->vsi->back;
1675 	struct i40e_hw *hw = &pf->hw;
1676 	int blink_freq = 2;
1677 
1678 	switch (state) {
1679 	case ETHTOOL_ID_ACTIVE:
1680 		pf->led_status = i40e_led_get(hw);
1681 		return blink_freq;
1682 	case ETHTOOL_ID_ON:
1683 		i40e_led_set(hw, 0xF, false);
1684 		break;
1685 	case ETHTOOL_ID_OFF:
1686 		i40e_led_set(hw, 0x0, false);
1687 		break;
1688 	case ETHTOOL_ID_INACTIVE:
1689 		i40e_led_set(hw, pf->led_status, false);
1690 		break;
1691 	default:
1692 		break;
1693 	}
1694 
1695 	return 0;
1696 }
1697 
1698 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
1699  * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
1700  * 125us (8000 interrupts per second) == ITR(62)
1701  */
1702 
1703 static int i40e_get_coalesce(struct net_device *netdev,
1704 			     struct ethtool_coalesce *ec)
1705 {
1706 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1707 	struct i40e_vsi *vsi = np->vsi;
1708 
1709 	ec->tx_max_coalesced_frames_irq = vsi->work_limit;
1710 	ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1711 
1712 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1713 		ec->use_adaptive_rx_coalesce = 1;
1714 
1715 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1716 		ec->use_adaptive_tx_coalesce = 1;
1717 
1718 	ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
1719 	ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
1720 
1721 	return 0;
1722 }
1723 
1724 static int i40e_set_coalesce(struct net_device *netdev,
1725 			     struct ethtool_coalesce *ec)
1726 {
1727 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1728 	struct i40e_q_vector *q_vector;
1729 	struct i40e_vsi *vsi = np->vsi;
1730 	struct i40e_pf *pf = vsi->back;
1731 	struct i40e_hw *hw = &pf->hw;
1732 	u16 vector;
1733 	int i;
1734 
1735 	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1736 		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1737 
1738 	vector = vsi->base_vector;
1739 	if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1740 	    (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
1741 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1742 	} else if (ec->rx_coalesce_usecs == 0) {
1743 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1744 		if (ec->use_adaptive_rx_coalesce)
1745 			netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
1746 	} else {
1747 		netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
1748 		return -EINVAL;
1749 	}
1750 
1751 	if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1752 	    (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
1753 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1754 	} else if (ec->tx_coalesce_usecs == 0) {
1755 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1756 		if (ec->use_adaptive_tx_coalesce)
1757 			netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
1758 	} else {
1759 		netif_info(pf, drv, netdev,
1760 			   "Invalid value, tx-usecs range is 0-8160\n");
1761 		return -EINVAL;
1762 	}
1763 
1764 	if (ec->use_adaptive_rx_coalesce)
1765 		vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
1766 	else
1767 		vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
1768 
1769 	if (ec->use_adaptive_tx_coalesce)
1770 		vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
1771 	else
1772 		vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
1773 
1774 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1775 		q_vector = vsi->q_vectors[i];
1776 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1777 		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1778 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1779 		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1780 		i40e_flush(hw);
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 /**
1787  * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1788  * @pf: pointer to the physical function struct
1789  * @cmd: ethtool rxnfc command
1790  *
1791  * Returns Success if the flow is supported, else Invalid Input.
1792  **/
1793 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1794 {
1795 	cmd->data = 0;
1796 
1797 	if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
1798 		cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
1799 		cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
1800 		return 0;
1801 	}
1802 	/* Report default options for RSS on i40e */
1803 	switch (cmd->flow_type) {
1804 	case TCP_V4_FLOW:
1805 	case UDP_V4_FLOW:
1806 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1807 	/* fall through to add IP fields */
1808 	case SCTP_V4_FLOW:
1809 	case AH_ESP_V4_FLOW:
1810 	case AH_V4_FLOW:
1811 	case ESP_V4_FLOW:
1812 	case IPV4_FLOW:
1813 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1814 		break;
1815 	case TCP_V6_FLOW:
1816 	case UDP_V6_FLOW:
1817 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1818 	/* fall through to add IP fields */
1819 	case SCTP_V6_FLOW:
1820 	case AH_ESP_V6_FLOW:
1821 	case AH_V6_FLOW:
1822 	case ESP_V6_FLOW:
1823 	case IPV6_FLOW:
1824 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1825 		break;
1826 	default:
1827 		return -EINVAL;
1828 	}
1829 
1830 	return 0;
1831 }
1832 
1833 /**
1834  * i40e_get_ethtool_fdir_all - Populates the rule count of a command
1835  * @pf: Pointer to the physical function struct
1836  * @cmd: The command to get or set Rx flow classification rules
1837  * @rule_locs: Array of used rule locations
1838  *
1839  * This function populates both the total and actual rule count of
1840  * the ethtool flow classification command
1841  *
1842  * Returns 0 on success or -EMSGSIZE if entry not found
1843  **/
1844 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1845 				     struct ethtool_rxnfc *cmd,
1846 				     u32 *rule_locs)
1847 {
1848 	struct i40e_fdir_filter *rule;
1849 	struct hlist_node *node2;
1850 	int cnt = 0;
1851 
1852 	/* report total rule count */
1853 	cmd->data = i40e_get_fd_cnt_all(pf);
1854 
1855 	hlist_for_each_entry_safe(rule, node2,
1856 				  &pf->fdir_filter_list, fdir_node) {
1857 		if (cnt == cmd->rule_cnt)
1858 			return -EMSGSIZE;
1859 
1860 		rule_locs[cnt] = rule->fd_id;
1861 		cnt++;
1862 	}
1863 
1864 	cmd->rule_cnt = cnt;
1865 
1866 	return 0;
1867 }
1868 
1869 /**
1870  * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
1871  * @pf: Pointer to the physical function struct
1872  * @cmd: The command to get or set Rx flow classification rules
1873  *
1874  * This function looks up a filter based on the Rx flow classification
1875  * command and fills the flow spec info for it if found
1876  *
1877  * Returns 0 on success or -EINVAL if filter not found
1878  **/
1879 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1880 				       struct ethtool_rxnfc *cmd)
1881 {
1882 	struct ethtool_rx_flow_spec *fsp =
1883 			(struct ethtool_rx_flow_spec *)&cmd->fs;
1884 	struct i40e_fdir_filter *rule = NULL;
1885 	struct hlist_node *node2;
1886 
1887 	hlist_for_each_entry_safe(rule, node2,
1888 				  &pf->fdir_filter_list, fdir_node) {
1889 		if (fsp->location <= rule->fd_id)
1890 			break;
1891 	}
1892 
1893 	if (!rule || fsp->location != rule->fd_id)
1894 		return -EINVAL;
1895 
1896 	fsp->flow_type = rule->flow_type;
1897 	if (fsp->flow_type == IP_USER_FLOW) {
1898 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1899 		fsp->h_u.usr_ip4_spec.proto = 0;
1900 		fsp->m_u.usr_ip4_spec.proto = 0;
1901 	}
1902 
1903 	/* Reverse the src and dest notion, since the HW views them from
1904 	 * Tx perspective where as the user expects it from Rx filter view.
1905 	 */
1906 	fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
1907 	fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
1908 	fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
1909 	fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
1910 
1911 	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
1912 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1913 	else
1914 		fsp->ring_cookie = rule->q_index;
1915 
1916 	if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
1917 		struct i40e_vsi *vsi;
1918 
1919 		vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
1920 		if (vsi && vsi->type == I40E_VSI_SRIOV) {
1921 			fsp->h_ext.data[1] = htonl(vsi->vf_id);
1922 			fsp->m_ext.data[1] = htonl(0x1);
1923 		}
1924 	}
1925 
1926 	return 0;
1927 }
1928 
1929 /**
1930  * i40e_get_rxnfc - command to get RX flow classification rules
1931  * @netdev: network interface device structure
1932  * @cmd: ethtool rxnfc command
1933  *
1934  * Returns Success if the command is supported.
1935  **/
1936 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1937 			  u32 *rule_locs)
1938 {
1939 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1940 	struct i40e_vsi *vsi = np->vsi;
1941 	struct i40e_pf *pf = vsi->back;
1942 	int ret = -EOPNOTSUPP;
1943 
1944 	switch (cmd->cmd) {
1945 	case ETHTOOL_GRXRINGS:
1946 		cmd->data = vsi->alloc_queue_pairs;
1947 		ret = 0;
1948 		break;
1949 	case ETHTOOL_GRXFH:
1950 		ret = i40e_get_rss_hash_opts(pf, cmd);
1951 		break;
1952 	case ETHTOOL_GRXCLSRLCNT:
1953 		cmd->rule_cnt = pf->fdir_pf_active_filters;
1954 		/* report total rule count */
1955 		cmd->data = i40e_get_fd_cnt_all(pf);
1956 		ret = 0;
1957 		break;
1958 	case ETHTOOL_GRXCLSRULE:
1959 		ret = i40e_get_ethtool_fdir_entry(pf, cmd);
1960 		break;
1961 	case ETHTOOL_GRXCLSRLALL:
1962 		ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
1963 		break;
1964 	default:
1965 		break;
1966 	}
1967 
1968 	return ret;
1969 }
1970 
1971 /**
1972  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1973  * @pf: pointer to the physical function struct
1974  * @cmd: ethtool rxnfc command
1975  *
1976  * Returns Success if the flow input set is supported.
1977  **/
1978 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1979 {
1980 	struct i40e_hw *hw = &pf->hw;
1981 	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1982 		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1983 
1984 	/* RSS does not support anything other than hashing
1985 	 * to queues on src and dst IPs and ports
1986 	 */
1987 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1988 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
1989 		return -EINVAL;
1990 
1991 	/* We need at least the IP SRC and DEST fields for hashing */
1992 	if (!(nfc->data & RXH_IP_SRC) ||
1993 	    !(nfc->data & RXH_IP_DST))
1994 		return -EINVAL;
1995 
1996 	switch (nfc->flow_type) {
1997 	case TCP_V4_FLOW:
1998 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1999 		case 0:
2000 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2001 			break;
2002 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2003 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2004 			break;
2005 		default:
2006 			return -EINVAL;
2007 		}
2008 		break;
2009 	case TCP_V6_FLOW:
2010 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2011 		case 0:
2012 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2013 			break;
2014 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2015 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2016 			break;
2017 		default:
2018 			return -EINVAL;
2019 		}
2020 		break;
2021 	case UDP_V4_FLOW:
2022 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2023 		case 0:
2024 			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2025 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
2026 			break;
2027 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2028 			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2029 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
2030 			break;
2031 		default:
2032 			return -EINVAL;
2033 		}
2034 		break;
2035 	case UDP_V6_FLOW:
2036 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2037 		case 0:
2038 			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2039 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
2040 			break;
2041 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2042 			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2043 				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
2044 			break;
2045 		default:
2046 			return -EINVAL;
2047 		}
2048 		break;
2049 	case AH_ESP_V4_FLOW:
2050 	case AH_V4_FLOW:
2051 	case ESP_V4_FLOW:
2052 	case SCTP_V4_FLOW:
2053 		if ((nfc->data & RXH_L4_B_0_1) ||
2054 		    (nfc->data & RXH_L4_B_2_3))
2055 			return -EINVAL;
2056 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2057 		break;
2058 	case AH_ESP_V6_FLOW:
2059 	case AH_V6_FLOW:
2060 	case ESP_V6_FLOW:
2061 	case SCTP_V6_FLOW:
2062 		if ((nfc->data & RXH_L4_B_0_1) ||
2063 		    (nfc->data & RXH_L4_B_2_3))
2064 			return -EINVAL;
2065 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2066 		break;
2067 	case IPV4_FLOW:
2068 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2069 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
2070 		break;
2071 	case IPV6_FLOW:
2072 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2073 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2074 		break;
2075 	default:
2076 		return -EINVAL;
2077 	}
2078 
2079 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
2080 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2081 	i40e_flush(hw);
2082 
2083 	/* Save setting for future output/update */
2084 	pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
2085 
2086 	return 0;
2087 }
2088 
2089 /**
2090  * i40e_match_fdir_input_set - Match a new filter against an existing one
2091  * @rule: The filter already added
2092  * @input: The new filter to comapre against
2093  *
2094  * Returns true if the two input set match
2095  **/
2096 static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
2097 				      struct i40e_fdir_filter *input)
2098 {
2099 	if ((rule->dst_ip[0] != input->dst_ip[0]) ||
2100 	    (rule->src_ip[0] != input->src_ip[0]) ||
2101 	    (rule->dst_port != input->dst_port) ||
2102 	    (rule->src_port != input->src_port))
2103 		return false;
2104 	return true;
2105 }
2106 
2107 /**
2108  * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
2109  * @vsi: Pointer to the targeted VSI
2110  * @input: The filter to update or NULL to indicate deletion
2111  * @sw_idx: Software index to the filter
2112  * @cmd: The command to get or set Rx flow classification rules
2113  *
2114  * This function updates (or deletes) a Flow Director entry from
2115  * the hlist of the corresponding PF
2116  *
2117  * Returns 0 on success
2118  **/
2119 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
2120 					  struct i40e_fdir_filter *input,
2121 					  u16 sw_idx,
2122 					  struct ethtool_rxnfc *cmd)
2123 {
2124 	struct i40e_fdir_filter *rule, *parent;
2125 	struct i40e_pf *pf = vsi->back;
2126 	struct hlist_node *node2;
2127 	int err = -EINVAL;
2128 
2129 	parent = NULL;
2130 	rule = NULL;
2131 
2132 	hlist_for_each_entry_safe(rule, node2,
2133 				  &pf->fdir_filter_list, fdir_node) {
2134 		/* hash found, or no matching entry */
2135 		if (rule->fd_id >= sw_idx)
2136 			break;
2137 		parent = rule;
2138 	}
2139 
2140 	/* if there is an old rule occupying our place remove it */
2141 	if (rule && (rule->fd_id == sw_idx)) {
2142 		if (input && !i40e_match_fdir_input_set(rule, input))
2143 			err = i40e_add_del_fdir(vsi, rule, false);
2144 		else if (!input)
2145 			err = i40e_add_del_fdir(vsi, rule, false);
2146 		hlist_del(&rule->fdir_node);
2147 		kfree(rule);
2148 		pf->fdir_pf_active_filters--;
2149 	}
2150 
2151 	/* If no input this was a delete, err should be 0 if a rule was
2152 	 * successfully found and removed from the list else -EINVAL
2153 	 */
2154 	if (!input)
2155 		return err;
2156 
2157 	/* initialize node and set software index */
2158 	INIT_HLIST_NODE(&input->fdir_node);
2159 
2160 	/* add filter to the list */
2161 	if (parent)
2162 		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2163 	else
2164 		hlist_add_head(&input->fdir_node,
2165 			       &pf->fdir_filter_list);
2166 
2167 	/* update counts */
2168 	pf->fdir_pf_active_filters++;
2169 
2170 	return 0;
2171 }
2172 
2173 /**
2174  * i40e_del_fdir_entry - Deletes a Flow Director filter entry
2175  * @vsi: Pointer to the targeted VSI
2176  * @cmd: The command to get or set Rx flow classification rules
2177  *
2178  * The function removes a Flow Director filter entry from the
2179  * hlist of the corresponding PF
2180  *
2181  * Returns 0 on success
2182  */
2183 static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
2184 			       struct ethtool_rxnfc *cmd)
2185 {
2186 	struct ethtool_rx_flow_spec *fsp =
2187 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2188 	struct i40e_pf *pf = vsi->back;
2189 	int ret = 0;
2190 
2191 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2192 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2193 		return -EBUSY;
2194 
2195 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2196 		return -EBUSY;
2197 
2198 	ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
2199 
2200 	i40e_fdir_check_and_reenable(pf);
2201 	return ret;
2202 }
2203 
2204 /**
2205  * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
2206  * @vsi: pointer to the targeted VSI
2207  * @cmd: command to get or set RX flow classification rules
2208  *
2209  * Add Flow Director filters for a specific flow spec based on their
2210  * protocol.  Returns 0 if the filters were successfully added.
2211  **/
2212 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2213 				 struct ethtool_rxnfc *cmd)
2214 {
2215 	struct ethtool_rx_flow_spec *fsp;
2216 	struct i40e_fdir_filter *input;
2217 	struct i40e_pf *pf;
2218 	int ret = -EINVAL;
2219 	u16 vf_id;
2220 
2221 	if (!vsi)
2222 		return -EINVAL;
2223 
2224 	pf = vsi->back;
2225 
2226 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2227 		return -EOPNOTSUPP;
2228 
2229 	if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
2230 		return -ENOSPC;
2231 
2232 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2233 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2234 		return -EBUSY;
2235 
2236 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2237 		return -EBUSY;
2238 
2239 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2240 
2241 	if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
2242 			      pf->hw.func_caps.fd_filters_guaranteed)) {
2243 		return -EINVAL;
2244 	}
2245 
2246 	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2247 	    (fsp->ring_cookie >= vsi->num_queue_pairs))
2248 		return -EINVAL;
2249 
2250 	input = kzalloc(sizeof(*input), GFP_KERNEL);
2251 
2252 	if (!input)
2253 		return -ENOMEM;
2254 
2255 	input->fd_id = fsp->location;
2256 
2257 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2258 		input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2259 	else
2260 		input->dest_ctl =
2261 			     I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
2262 
2263 	input->q_index = fsp->ring_cookie;
2264 	input->flex_off = 0;
2265 	input->pctype = 0;
2266 	input->dest_vsi = vsi->id;
2267 	input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
2268 	input->cnt_index  = pf->fd_sb_cnt_idx;
2269 	input->flow_type = fsp->flow_type;
2270 	input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
2271 
2272 	/* Reverse the src and dest notion, since the HW expects them to be from
2273 	 * Tx perspective where as the input from user is from Rx filter view.
2274 	 */
2275 	input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
2276 	input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
2277 	input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2278 	input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2279 
2280 	if (ntohl(fsp->m_ext.data[1])) {
2281 		if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
2282 			netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
2283 			goto free_input;
2284 		}
2285 		vf_id = ntohl(fsp->h_ext.data[1]);
2286 		/* Find vsi id from vf id and override dest vsi */
2287 		input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
2288 		if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
2289 			netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
2290 			goto free_input;
2291 		}
2292 	}
2293 
2294 	ret = i40e_add_del_fdir(vsi, input, true);
2295 free_input:
2296 	if (ret)
2297 		kfree(input);
2298 	else
2299 		i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
2300 
2301 	return ret;
2302 }
2303 
2304 /**
2305  * i40e_set_rxnfc - command to set RX flow classification rules
2306  * @netdev: network interface device structure
2307  * @cmd: ethtool rxnfc command
2308  *
2309  * Returns Success if the command is supported.
2310  **/
2311 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2312 {
2313 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2314 	struct i40e_vsi *vsi = np->vsi;
2315 	struct i40e_pf *pf = vsi->back;
2316 	int ret = -EOPNOTSUPP;
2317 
2318 	switch (cmd->cmd) {
2319 	case ETHTOOL_SRXFH:
2320 		ret = i40e_set_rss_hash_opt(pf, cmd);
2321 		break;
2322 	case ETHTOOL_SRXCLSRLINS:
2323 		ret = i40e_add_fdir_ethtool(vsi, cmd);
2324 		break;
2325 	case ETHTOOL_SRXCLSRLDEL:
2326 		ret = i40e_del_fdir_entry(vsi, cmd);
2327 		break;
2328 	default:
2329 		break;
2330 	}
2331 
2332 	return ret;
2333 }
2334 
2335 /**
2336  * i40e_max_channels - get Max number of combined channels supported
2337  * @vsi: vsi pointer
2338  **/
2339 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
2340 {
2341 	/* TODO: This code assumes DCB and FD is disabled for now. */
2342 	return vsi->alloc_queue_pairs;
2343 }
2344 
2345 /**
2346  * i40e_get_channels - Get the current channels enabled and max supported etc.
2347  * @netdev: network interface device structure
2348  * @ch: ethtool channels structure
2349  *
2350  * We don't support separate tx and rx queues as channels. The other count
2351  * represents how many queues are being used for control. max_combined counts
2352  * how many queue pairs we can support. They may not be mapped 1 to 1 with
2353  * q_vectors since we support a lot more queue pairs than q_vectors.
2354  **/
2355 static void i40e_get_channels(struct net_device *dev,
2356 			       struct ethtool_channels *ch)
2357 {
2358 	struct i40e_netdev_priv *np = netdev_priv(dev);
2359 	struct i40e_vsi *vsi = np->vsi;
2360 	struct i40e_pf *pf = vsi->back;
2361 
2362 	/* report maximum channels */
2363 	ch->max_combined = i40e_max_channels(vsi);
2364 
2365 	/* report info for other vector */
2366 	ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
2367 	ch->max_other = ch->other_count;
2368 
2369 	/* Note: This code assumes DCB is disabled for now. */
2370 	ch->combined_count = vsi->num_queue_pairs;
2371 }
2372 
2373 /**
2374  * i40e_set_channels - Set the new channels count.
2375  * @netdev: network interface device structure
2376  * @ch: ethtool channels structure
2377  *
2378  * The new channels count may not be the same as requested by the user
2379  * since it gets rounded down to a power of 2 value.
2380  **/
2381 static int i40e_set_channels(struct net_device *dev,
2382 			      struct ethtool_channels *ch)
2383 {
2384 	struct i40e_netdev_priv *np = netdev_priv(dev);
2385 	unsigned int count = ch->combined_count;
2386 	struct i40e_vsi *vsi = np->vsi;
2387 	struct i40e_pf *pf = vsi->back;
2388 	int new_count;
2389 
2390 	/* We do not support setting channels for any other VSI at present */
2391 	if (vsi->type != I40E_VSI_MAIN)
2392 		return -EINVAL;
2393 
2394 	/* verify they are not requesting separate vectors */
2395 	if (!count || ch->rx_count || ch->tx_count)
2396 		return -EINVAL;
2397 
2398 	/* verify other_count has not changed */
2399 	if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
2400 		return -EINVAL;
2401 
2402 	/* verify the number of channels does not exceed hardware limits */
2403 	if (count > i40e_max_channels(vsi))
2404 		return -EINVAL;
2405 
2406 	/* update feature limits from largest to smallest supported values */
2407 	/* TODO: Flow director limit, DCB etc */
2408 
2409 	/* use rss_reconfig to rebuild with new queue count and update traffic
2410 	 * class queue mapping
2411 	 */
2412 	new_count = i40e_reconfig_rss_queues(pf, count);
2413 	if (new_count > 0)
2414 		return 0;
2415 	else
2416 		return -EINVAL;
2417 }
2418 
2419 #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
2420 /**
2421  * i40e_get_rxfh_key_size - get the RSS hash key size
2422  * @netdev: network interface device structure
2423  *
2424  * Returns the table size.
2425  **/
2426 static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
2427 {
2428 	return I40E_HKEY_ARRAY_SIZE;
2429 }
2430 
2431 /**
2432  * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
2433  * @netdev: network interface device structure
2434  *
2435  * Returns the table size.
2436  **/
2437 static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
2438 {
2439 	return I40E_HLUT_ARRAY_SIZE;
2440 }
2441 
2442 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2443 			 u8 *hfunc)
2444 {
2445 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2446 	struct i40e_vsi *vsi = np->vsi;
2447 	struct i40e_pf *pf = vsi->back;
2448 	struct i40e_hw *hw = &pf->hw;
2449 	u32 reg_val;
2450 	int i, j;
2451 
2452 	if (hfunc)
2453 		*hfunc = ETH_RSS_HASH_TOP;
2454 
2455 	if (!indir)
2456 		return 0;
2457 
2458 	for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
2459 		reg_val = rd32(hw, I40E_PFQF_HLUT(i));
2460 		indir[j++] = reg_val & 0xff;
2461 		indir[j++] = (reg_val >> 8) & 0xff;
2462 		indir[j++] = (reg_val >> 16) & 0xff;
2463 		indir[j++] = (reg_val >> 24) & 0xff;
2464 	}
2465 
2466 	if (key) {
2467 		for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
2468 			reg_val = rd32(hw, I40E_PFQF_HKEY(i));
2469 			key[j++] = (u8)(reg_val & 0xff);
2470 			key[j++] = (u8)((reg_val >> 8) & 0xff);
2471 			key[j++] = (u8)((reg_val >> 16) & 0xff);
2472 			key[j++] = (u8)((reg_val >> 24) & 0xff);
2473 		}
2474 	}
2475 	return 0;
2476 }
2477 
2478 /**
2479  * i40e_set_rxfh - set the rx flow hash indirection table
2480  * @netdev: network interface device structure
2481  * @indir: indirection table
2482  * @key: hash key
2483  *
2484  * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
2485  * returns 0 after programming the table.
2486  **/
2487 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
2488 			 const u8 *key, const u8 hfunc)
2489 {
2490 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2491 	struct i40e_vsi *vsi = np->vsi;
2492 	struct i40e_pf *pf = vsi->back;
2493 	struct i40e_hw *hw = &pf->hw;
2494 	u32 reg_val;
2495 	int i, j;
2496 
2497 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
2498 		return -EOPNOTSUPP;
2499 
2500 	if (!indir)
2501 		return 0;
2502 
2503 	for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
2504 		reg_val = indir[j++];
2505 		reg_val |= indir[j++] << 8;
2506 		reg_val |= indir[j++] << 16;
2507 		reg_val |= indir[j++] << 24;
2508 		wr32(hw, I40E_PFQF_HLUT(i), reg_val);
2509 	}
2510 
2511 	if (key) {
2512 		for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
2513 			reg_val = key[j++];
2514 			reg_val |= key[j++] << 8;
2515 			reg_val |= key[j++] << 16;
2516 			reg_val |= key[j++] << 24;
2517 			wr32(hw, I40E_PFQF_HKEY(i), reg_val);
2518 		}
2519 	}
2520 	return 0;
2521 }
2522 
2523 /**
2524  * i40e_get_priv_flags - report device private flags
2525  * @dev: network interface device structure
2526  *
2527  * The get string set count and the string set should be matched for each
2528  * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
2529  * array.
2530  *
2531  * Returns a u32 bitmap of flags.
2532  **/
2533 static u32 i40e_get_priv_flags(struct net_device *dev)
2534 {
2535 	struct i40e_netdev_priv *np = netdev_priv(dev);
2536 	struct i40e_vsi *vsi = np->vsi;
2537 	struct i40e_pf *pf = vsi->back;
2538 	u32 ret_flags = 0;
2539 
2540 	ret_flags |= pf->hw.func_caps.npar_enable ?
2541 		I40E_PRIV_FLAGS_NPAR_FLAG : 0;
2542 
2543 	return ret_flags;
2544 }
2545 
2546 static const struct ethtool_ops i40e_ethtool_ops = {
2547 	.get_settings		= i40e_get_settings,
2548 	.set_settings		= i40e_set_settings,
2549 	.get_drvinfo		= i40e_get_drvinfo,
2550 	.get_regs_len		= i40e_get_regs_len,
2551 	.get_regs		= i40e_get_regs,
2552 	.nway_reset		= i40e_nway_reset,
2553 	.get_link		= ethtool_op_get_link,
2554 	.get_wol		= i40e_get_wol,
2555 	.set_wol		= i40e_set_wol,
2556 	.set_eeprom		= i40e_set_eeprom,
2557 	.get_eeprom_len		= i40e_get_eeprom_len,
2558 	.get_eeprom		= i40e_get_eeprom,
2559 	.get_ringparam		= i40e_get_ringparam,
2560 	.set_ringparam		= i40e_set_ringparam,
2561 	.get_pauseparam		= i40e_get_pauseparam,
2562 	.set_pauseparam		= i40e_set_pauseparam,
2563 	.get_msglevel		= i40e_get_msglevel,
2564 	.set_msglevel		= i40e_set_msglevel,
2565 	.get_rxnfc		= i40e_get_rxnfc,
2566 	.set_rxnfc		= i40e_set_rxnfc,
2567 	.self_test		= i40e_diag_test,
2568 	.get_strings		= i40e_get_strings,
2569 	.set_phys_id		= i40e_set_phys_id,
2570 	.get_sset_count		= i40e_get_sset_count,
2571 	.get_ethtool_stats	= i40e_get_ethtool_stats,
2572 	.get_coalesce		= i40e_get_coalesce,
2573 	.set_coalesce		= i40e_set_coalesce,
2574 	.get_rxfh_key_size	= i40e_get_rxfh_key_size,
2575 	.get_rxfh_indir_size	= i40e_get_rxfh_indir_size,
2576 	.get_rxfh		= i40e_get_rxfh,
2577 	.set_rxfh		= i40e_set_rxfh,
2578 	.get_channels		= i40e_get_channels,
2579 	.set_channels		= i40e_set_channels,
2580 	.get_ts_info		= i40e_get_ts_info,
2581 	.get_priv_flags		= i40e_get_priv_flags,
2582 };
2583 
2584 void i40e_set_ethtool_ops(struct net_device *netdev)
2585 {
2586 	netdev->ethtool_ops = &i40e_ethtool_ops;
2587 }
2588