1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 /* ethtool support for i40e */
28 
29 #include "i40e.h"
30 #include "i40e_diag.h"
31 
32 struct i40e_stats {
33 	char stat_string[ETH_GSTRING_LEN];
34 	int sizeof_stat;
35 	int stat_offset;
36 };
37 
38 #define I40E_STAT(_type, _name, _stat) { \
39 	.stat_string = _name, \
40 	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
41 	.stat_offset = offsetof(_type, _stat) \
42 }
43 
44 #define I40E_NETDEV_STAT(_net_stat) \
45 		I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
46 #define I40E_PF_STAT(_name, _stat) \
47 		I40E_STAT(struct i40e_pf, _name, _stat)
48 #define I40E_VSI_STAT(_name, _stat) \
49 		I40E_STAT(struct i40e_vsi, _name, _stat)
50 #define I40E_VEB_STAT(_name, _stat) \
51 		I40E_STAT(struct i40e_veb, _name, _stat)
52 
53 static const struct i40e_stats i40e_gstrings_net_stats[] = {
54 	I40E_NETDEV_STAT(rx_packets),
55 	I40E_NETDEV_STAT(tx_packets),
56 	I40E_NETDEV_STAT(rx_bytes),
57 	I40E_NETDEV_STAT(tx_bytes),
58 	I40E_NETDEV_STAT(rx_errors),
59 	I40E_NETDEV_STAT(tx_errors),
60 	I40E_NETDEV_STAT(rx_dropped),
61 	I40E_NETDEV_STAT(tx_dropped),
62 	I40E_NETDEV_STAT(collisions),
63 	I40E_NETDEV_STAT(rx_length_errors),
64 	I40E_NETDEV_STAT(rx_crc_errors),
65 };
66 
67 static const struct i40e_stats i40e_gstrings_veb_stats[] = {
68 	I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
69 	I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
70 	I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
71 	I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
72 	I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
73 	I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
74 	I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
75 	I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
76 	I40E_VEB_STAT("rx_discards", stats.rx_discards),
77 	I40E_VEB_STAT("tx_discards", stats.tx_discards),
78 	I40E_VEB_STAT("tx_errors", stats.tx_errors),
79 	I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
80 };
81 
82 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
83 	I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
84 	I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
85 	I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
86 	I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
87 	I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
88 	I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
89 	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
90 };
91 
92 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
93 				 struct ethtool_rxnfc *cmd);
94 
95 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
96  * but they are separate.  This device supports Virtualization, and
97  * as such might have several netdevs supporting VMDq and FCoE going
98  * through a single port.  The NETDEV_STATs are for individual netdevs
99  * seen at the top of the stack, and the PF_STATs are for the physical
100  * function at the bottom of the stack hosting those netdevs.
101  *
102  * The PF_STATs are appended to the netdev stats only when ethtool -S
103  * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
104  */
105 static struct i40e_stats i40e_gstrings_stats[] = {
106 	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
107 	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
108 	I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
109 	I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
110 	I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
111 	I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
112 	I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
113 	I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
114 	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
115 	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
116 	I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
117 	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
118 	I40E_PF_STAT("crc_errors", stats.crc_errors),
119 	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
120 	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
121 	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
122 	I40E_PF_STAT("tx_timeout", tx_timeout_count),
123 	I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
124 	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
125 	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
126 	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
127 	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
128 	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
129 	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
130 	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
131 	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
132 	I40E_PF_STAT("rx_size_511", stats.rx_size_511),
133 	I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
134 	I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
135 	I40E_PF_STAT("rx_size_big", stats.rx_size_big),
136 	I40E_PF_STAT("tx_size_64", stats.tx_size_64),
137 	I40E_PF_STAT("tx_size_127", stats.tx_size_127),
138 	I40E_PF_STAT("tx_size_255", stats.tx_size_255),
139 	I40E_PF_STAT("tx_size_511", stats.tx_size_511),
140 	I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
141 	I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
142 	I40E_PF_STAT("tx_size_big", stats.tx_size_big),
143 	I40E_PF_STAT("rx_undersize", stats.rx_undersize),
144 	I40E_PF_STAT("rx_fragments", stats.rx_fragments),
145 	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
146 	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
147 	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
148 	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
149 	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
150 	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
151 	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
152 
153 	/* LPI stats */
154 	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
155 	I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
156 	I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
157 	I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
158 };
159 
160 #ifdef I40E_FCOE
161 static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
162 	I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
163 	I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
164 	I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
165 	I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
166 	I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
167 	I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
168 	I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
169 	I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
170 };
171 
172 #endif /* I40E_FCOE */
173 #define I40E_QUEUE_STATS_LEN(n) \
174 	(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
175 	    * 2 /* Tx and Rx together */                                     \
176 	    * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
177 #define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
178 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
179 #define I40E_MISC_STATS_LEN	ARRAY_SIZE(i40e_gstrings_misc_stats)
180 #ifdef I40E_FCOE
181 #define I40E_FCOE_STATS_LEN	ARRAY_SIZE(i40e_gstrings_fcoe_stats)
182 #define I40E_VSI_STATS_LEN(n)	(I40E_NETDEV_STATS_LEN + \
183 				 I40E_FCOE_STATS_LEN + \
184 				 I40E_MISC_STATS_LEN + \
185 				 I40E_QUEUE_STATS_LEN((n)))
186 #else
187 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
188 				 I40E_MISC_STATS_LEN + \
189 				 I40E_QUEUE_STATS_LEN((n)))
190 #endif /* I40E_FCOE */
191 #define I40E_PFC_STATS_LEN ( \
192 		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
193 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
194 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
195 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
196 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
197 		 / sizeof(u64))
198 #define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)
199 #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
200 				 I40E_PFC_STATS_LEN + \
201 				 I40E_VSI_STATS_LEN((n)))
202 
203 enum i40e_ethtool_test_id {
204 	I40E_ETH_TEST_REG = 0,
205 	I40E_ETH_TEST_EEPROM,
206 	I40E_ETH_TEST_INTR,
207 	I40E_ETH_TEST_LOOPBACK,
208 	I40E_ETH_TEST_LINK,
209 };
210 
211 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
212 	"Register test  (offline)",
213 	"Eeprom test    (offline)",
214 	"Interrupt test (offline)",
215 	"Loopback test  (offline)",
216 	"Link test   (on/offline)"
217 };
218 
219 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
220 
221 /**
222  * i40e_partition_setting_complaint - generic complaint for MFP restriction
223  * @pf: the PF struct
224  **/
225 static void i40e_partition_setting_complaint(struct i40e_pf *pf)
226 {
227 	dev_info(&pf->pdev->dev,
228 		 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
229 }
230 
231 /**
232  * i40e_get_settings - Get Link Speed and Duplex settings
233  * @netdev: network interface device structure
234  * @ecmd: ethtool command
235  *
236  * Reports speed/duplex settings based on media_type
237  **/
238 static int i40e_get_settings(struct net_device *netdev,
239 			     struct ethtool_cmd *ecmd)
240 {
241 	struct i40e_netdev_priv *np = netdev_priv(netdev);
242 	struct i40e_pf *pf = np->vsi->back;
243 	struct i40e_hw *hw = &pf->hw;
244 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
245 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
246 	u32 link_speed = hw_link_info->link_speed;
247 
248 	/* hardware is either in 40G mode or 10G mode
249 	 * NOTE: this section initializes supported and advertising
250 	 */
251 	if (!link_up) {
252 		/* link is down and the driver needs to fall back on
253 		 * device ID to determine what kinds of info to display,
254 		 * it's mostly a guess that may change when link is up
255 		 */
256 		switch (hw->device_id) {
257 		case I40E_DEV_ID_QSFP_A:
258 		case I40E_DEV_ID_QSFP_B:
259 		case I40E_DEV_ID_QSFP_C:
260 			/* pluggable QSFP */
261 			ecmd->supported = SUPPORTED_40000baseSR4_Full |
262 					  SUPPORTED_40000baseCR4_Full |
263 					  SUPPORTED_40000baseLR4_Full;
264 			ecmd->advertising = ADVERTISED_40000baseSR4_Full |
265 					    ADVERTISED_40000baseCR4_Full |
266 					    ADVERTISED_40000baseLR4_Full;
267 			break;
268 		case I40E_DEV_ID_KX_B:
269 			/* backplane 40G */
270 			ecmd->supported = SUPPORTED_40000baseKR4_Full;
271 			ecmd->advertising = ADVERTISED_40000baseKR4_Full;
272 			break;
273 		case I40E_DEV_ID_KX_C:
274 			/* backplane 10G */
275 			ecmd->supported = SUPPORTED_10000baseKR_Full;
276 			ecmd->advertising = ADVERTISED_10000baseKR_Full;
277 			break;
278 		case I40E_DEV_ID_10G_BASE_T:
279 			ecmd->supported = SUPPORTED_10000baseT_Full |
280 					  SUPPORTED_1000baseT_Full |
281 					  SUPPORTED_100baseT_Full;
282 			ecmd->advertising = ADVERTISED_10000baseT_Full |
283 					    ADVERTISED_1000baseT_Full |
284 					    ADVERTISED_100baseT_Full;
285 			break;
286 		default:
287 			/* all the rest are 10G/1G */
288 			ecmd->supported = SUPPORTED_10000baseT_Full |
289 					  SUPPORTED_1000baseT_Full;
290 			ecmd->advertising = ADVERTISED_10000baseT_Full |
291 					    ADVERTISED_1000baseT_Full;
292 			break;
293 		}
294 
295 		/* skip phy_type use as it is zero when link is down */
296 		goto no_valid_phy_type;
297 	}
298 
299 	switch (hw_link_info->phy_type) {
300 	case I40E_PHY_TYPE_40GBASE_CR4:
301 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
302 		ecmd->supported = SUPPORTED_Autoneg |
303 				  SUPPORTED_40000baseCR4_Full;
304 		ecmd->advertising = ADVERTISED_Autoneg |
305 				    ADVERTISED_40000baseCR4_Full;
306 		break;
307 	case I40E_PHY_TYPE_40GBASE_KR4:
308 		ecmd->supported = SUPPORTED_Autoneg |
309 				  SUPPORTED_40000baseKR4_Full;
310 		ecmd->advertising = ADVERTISED_Autoneg |
311 				    ADVERTISED_40000baseKR4_Full;
312 		break;
313 	case I40E_PHY_TYPE_40GBASE_SR4:
314 	case I40E_PHY_TYPE_XLPPI:
315 	case I40E_PHY_TYPE_XLAUI:
316 		ecmd->supported = SUPPORTED_40000baseSR4_Full;
317 		break;
318 	case I40E_PHY_TYPE_40GBASE_LR4:
319 		ecmd->supported = SUPPORTED_40000baseLR4_Full;
320 		break;
321 	case I40E_PHY_TYPE_10GBASE_KX4:
322 		ecmd->supported = SUPPORTED_Autoneg |
323 				  SUPPORTED_10000baseKX4_Full;
324 		ecmd->advertising = ADVERTISED_Autoneg |
325 				    ADVERTISED_10000baseKX4_Full;
326 		break;
327 	case I40E_PHY_TYPE_10GBASE_KR:
328 		ecmd->supported = SUPPORTED_Autoneg |
329 				  SUPPORTED_10000baseKR_Full;
330 		ecmd->advertising = ADVERTISED_Autoneg |
331 				    ADVERTISED_10000baseKR_Full;
332 		break;
333 	case I40E_PHY_TYPE_10GBASE_SR:
334 	case I40E_PHY_TYPE_10GBASE_LR:
335 	case I40E_PHY_TYPE_1000BASE_SX:
336 	case I40E_PHY_TYPE_1000BASE_LX:
337 		ecmd->supported = SUPPORTED_10000baseT_Full;
338 		ecmd->supported |= SUPPORTED_1000baseT_Full;
339 		break;
340 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
341 	case I40E_PHY_TYPE_10GBASE_CR1:
342 	case I40E_PHY_TYPE_10GBASE_T:
343 		ecmd->supported = SUPPORTED_Autoneg |
344 				  SUPPORTED_10000baseT_Full |
345 				  SUPPORTED_1000baseT_Full |
346 				  SUPPORTED_100baseT_Full;
347 		ecmd->advertising = ADVERTISED_Autoneg |
348 				    ADVERTISED_10000baseT_Full |
349 				    ADVERTISED_1000baseT_Full |
350 				    ADVERTISED_100baseT_Full;
351 		break;
352 	case I40E_PHY_TYPE_XAUI:
353 	case I40E_PHY_TYPE_XFI:
354 	case I40E_PHY_TYPE_SFI:
355 	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
356 		ecmd->supported = SUPPORTED_10000baseT_Full;
357 		break;
358 	case I40E_PHY_TYPE_1000BASE_KX:
359 	case I40E_PHY_TYPE_1000BASE_T:
360 		ecmd->supported = SUPPORTED_Autoneg |
361 				  SUPPORTED_10000baseT_Full |
362 				  SUPPORTED_1000baseT_Full |
363 				  SUPPORTED_100baseT_Full;
364 		ecmd->advertising = ADVERTISED_Autoneg |
365 				    ADVERTISED_10000baseT_Full |
366 				    ADVERTISED_1000baseT_Full |
367 				    ADVERTISED_100baseT_Full;
368 		break;
369 	case I40E_PHY_TYPE_100BASE_TX:
370 		ecmd->supported = SUPPORTED_Autoneg |
371 				  SUPPORTED_10000baseT_Full |
372 				  SUPPORTED_1000baseT_Full |
373 				  SUPPORTED_100baseT_Full;
374 		ecmd->advertising = ADVERTISED_Autoneg |
375 				    ADVERTISED_10000baseT_Full |
376 				    ADVERTISED_1000baseT_Full |
377 				    ADVERTISED_100baseT_Full;
378 		break;
379 	case I40E_PHY_TYPE_SGMII:
380 		ecmd->supported = SUPPORTED_Autoneg |
381 				  SUPPORTED_1000baseT_Full |
382 				  SUPPORTED_100baseT_Full;
383 		ecmd->advertising = ADVERTISED_Autoneg |
384 				    ADVERTISED_1000baseT_Full |
385 				    ADVERTISED_100baseT_Full;
386 		break;
387 	default:
388 		/* if we got here and link is up something bad is afoot */
389 		netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
390 			    hw_link_info->phy_type);
391 	}
392 
393 no_valid_phy_type:
394 	/* this is if autoneg is enabled or disabled */
395 	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
396 			  AUTONEG_ENABLE : AUTONEG_DISABLE);
397 
398 	switch (hw->phy.media_type) {
399 	case I40E_MEDIA_TYPE_BACKPLANE:
400 		ecmd->supported |= SUPPORTED_Autoneg |
401 				   SUPPORTED_Backplane;
402 		ecmd->advertising |= ADVERTISED_Autoneg |
403 				     ADVERTISED_Backplane;
404 		ecmd->port = PORT_NONE;
405 		break;
406 	case I40E_MEDIA_TYPE_BASET:
407 		ecmd->supported |= SUPPORTED_TP;
408 		ecmd->advertising |= ADVERTISED_TP;
409 		ecmd->port = PORT_TP;
410 		break;
411 	case I40E_MEDIA_TYPE_DA:
412 	case I40E_MEDIA_TYPE_CX4:
413 		ecmd->supported |= SUPPORTED_FIBRE;
414 		ecmd->advertising |= ADVERTISED_FIBRE;
415 		ecmd->port = PORT_DA;
416 		break;
417 	case I40E_MEDIA_TYPE_FIBER:
418 		ecmd->supported |= SUPPORTED_FIBRE;
419 		ecmd->port = PORT_FIBRE;
420 		break;
421 	case I40E_MEDIA_TYPE_UNKNOWN:
422 	default:
423 		ecmd->port = PORT_OTHER;
424 		break;
425 	}
426 
427 	ecmd->transceiver = XCVR_EXTERNAL;
428 
429 	ecmd->supported |= SUPPORTED_Pause;
430 
431 	switch (hw->fc.current_mode) {
432 	case I40E_FC_FULL:
433 		ecmd->advertising |= ADVERTISED_Pause;
434 		break;
435 	case I40E_FC_TX_PAUSE:
436 		ecmd->advertising |= ADVERTISED_Asym_Pause;
437 		break;
438 	case I40E_FC_RX_PAUSE:
439 		ecmd->advertising |= (ADVERTISED_Pause |
440 				      ADVERTISED_Asym_Pause);
441 		break;
442 	default:
443 		ecmd->advertising &= ~(ADVERTISED_Pause |
444 				       ADVERTISED_Asym_Pause);
445 		break;
446 	}
447 
448 	if (link_up) {
449 		switch (link_speed) {
450 		case I40E_LINK_SPEED_40GB:
451 			/* need a SPEED_40000 in ethtool.h */
452 			ethtool_cmd_speed_set(ecmd, 40000);
453 			break;
454 		case I40E_LINK_SPEED_10GB:
455 			ethtool_cmd_speed_set(ecmd, SPEED_10000);
456 			break;
457 		case I40E_LINK_SPEED_1GB:
458 			ethtool_cmd_speed_set(ecmd, SPEED_1000);
459 			break;
460 		case I40E_LINK_SPEED_100MB:
461 			ethtool_cmd_speed_set(ecmd, SPEED_100);
462 			break;
463 		default:
464 			break;
465 		}
466 		ecmd->duplex = DUPLEX_FULL;
467 	} else {
468 		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
469 		ecmd->duplex = DUPLEX_UNKNOWN;
470 	}
471 
472 	return 0;
473 }
474 
475 /**
476  * i40e_set_settings - Set Speed and Duplex
477  * @netdev: network interface device structure
478  * @ecmd: ethtool command
479  *
480  * Set speed/duplex per media_types advertised/forced
481  **/
482 static int i40e_set_settings(struct net_device *netdev,
483 			     struct ethtool_cmd *ecmd)
484 {
485 	struct i40e_netdev_priv *np = netdev_priv(netdev);
486 	struct i40e_aq_get_phy_abilities_resp abilities;
487 	struct i40e_aq_set_phy_config config;
488 	struct i40e_pf *pf = np->vsi->back;
489 	struct i40e_vsi *vsi = np->vsi;
490 	struct i40e_hw *hw = &pf->hw;
491 	struct ethtool_cmd safe_ecmd;
492 	i40e_status status = 0;
493 	bool change = false;
494 	int err = 0;
495 	u8 autoneg;
496 	u32 advertise;
497 
498 	/* Changing port settings is not supported if this isn't the
499 	 * port's controlling PF
500 	 */
501 	if (hw->partition_id != 1) {
502 		i40e_partition_setting_complaint(pf);
503 		return -EOPNOTSUPP;
504 	}
505 
506 	if (vsi != pf->vsi[pf->lan_vsi])
507 		return -EOPNOTSUPP;
508 
509 	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
510 	    hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
511 	    hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
512 	    hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
513 		return -EOPNOTSUPP;
514 
515 	/* get our own copy of the bits to check against */
516 	memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
517 	i40e_get_settings(netdev, &safe_ecmd);
518 
519 	/* save autoneg and speed out of ecmd */
520 	autoneg = ecmd->autoneg;
521 	advertise = ecmd->advertising;
522 
523 	/* set autoneg and speed back to what they currently are */
524 	ecmd->autoneg = safe_ecmd.autoneg;
525 	ecmd->advertising = safe_ecmd.advertising;
526 
527 	ecmd->cmd = safe_ecmd.cmd;
528 	/* If ecmd and safe_ecmd are not the same now, then they are
529 	 * trying to set something that we do not support
530 	 */
531 	if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
532 		return -EOPNOTSUPP;
533 
534 	while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
535 		usleep_range(1000, 2000);
536 
537 	/* Get the current phy config */
538 	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
539 					      NULL);
540 	if (status)
541 		return -EAGAIN;
542 
543 	/* Copy abilities to config in case autoneg is not
544 	 * set below
545 	 */
546 	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
547 	config.abilities = abilities.abilities;
548 
549 	/* Check autoneg */
550 	if (autoneg == AUTONEG_ENABLE) {
551 		/* If autoneg is not supported, return error */
552 		if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
553 			netdev_info(netdev, "Autoneg not supported on this phy\n");
554 			return -EINVAL;
555 		}
556 		/* If autoneg was not already enabled */
557 		if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
558 			config.abilities = abilities.abilities |
559 					   I40E_AQ_PHY_ENABLE_AN;
560 			change = true;
561 		}
562 	} else {
563 		/* If autoneg is supported 10GBASE_T is the only phy that
564 		 * can disable it, so otherwise return error
565 		 */
566 		if (safe_ecmd.supported & SUPPORTED_Autoneg &&
567 		    hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
568 			netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
569 			return -EINVAL;
570 		}
571 		/* If autoneg is currently enabled */
572 		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
573 			config.abilities = abilities.abilities &
574 					   ~I40E_AQ_PHY_ENABLE_AN;
575 			change = true;
576 		}
577 	}
578 
579 	if (advertise & ~safe_ecmd.supported)
580 		return -EINVAL;
581 
582 	if (advertise & ADVERTISED_100baseT_Full)
583 		config.link_speed |= I40E_LINK_SPEED_100MB;
584 	if (advertise & ADVERTISED_1000baseT_Full ||
585 	    advertise & ADVERTISED_1000baseKX_Full)
586 		config.link_speed |= I40E_LINK_SPEED_1GB;
587 	if (advertise & ADVERTISED_10000baseT_Full ||
588 	    advertise & ADVERTISED_10000baseKX4_Full ||
589 	    advertise & ADVERTISED_10000baseKR_Full)
590 		config.link_speed |= I40E_LINK_SPEED_10GB;
591 	if (advertise & ADVERTISED_40000baseKR4_Full ||
592 	    advertise & ADVERTISED_40000baseCR4_Full ||
593 	    advertise & ADVERTISED_40000baseSR4_Full ||
594 	    advertise & ADVERTISED_40000baseLR4_Full)
595 		config.link_speed |= I40E_LINK_SPEED_40GB;
596 
597 	if (change || (abilities.link_speed != config.link_speed)) {
598 		/* copy over the rest of the abilities */
599 		config.phy_type = abilities.phy_type;
600 		config.eee_capability = abilities.eee_capability;
601 		config.eeer = abilities.eeer_val;
602 		config.low_power_ctrl = abilities.d3_lpan;
603 
604 		/* set link and auto negotiation so changes take effect */
605 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
606 		/* If link is up put link down */
607 		if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
608 			/* Tell the OS link is going down, the link will go
609 			 * back up when fw says it is ready asynchronously
610 			 */
611 			netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
612 			netif_carrier_off(netdev);
613 			netif_tx_stop_all_queues(netdev);
614 		}
615 
616 		/* make the aq call */
617 		status = i40e_aq_set_phy_config(hw, &config, NULL);
618 		if (status) {
619 			netdev_info(netdev, "Set phy config failed with error %d.\n",
620 				    status);
621 			return -EAGAIN;
622 		}
623 
624 		status = i40e_update_link_info(hw, true);
625 		if (status)
626 			netdev_info(netdev, "Updating link info failed with error %d\n",
627 				    status);
628 
629 	} else {
630 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
631 	}
632 
633 	return err;
634 }
635 
636 static int i40e_nway_reset(struct net_device *netdev)
637 {
638 	/* restart autonegotiation */
639 	struct i40e_netdev_priv *np = netdev_priv(netdev);
640 	struct i40e_pf *pf = np->vsi->back;
641 	struct i40e_hw *hw = &pf->hw;
642 	bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
643 	i40e_status ret = 0;
644 
645 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
646 	if (ret) {
647 		netdev_info(netdev, "link restart failed, aq_err=%d\n",
648 			    pf->hw.aq.asq_last_status);
649 		return -EIO;
650 	}
651 
652 	return 0;
653 }
654 
655 /**
656  * i40e_get_pauseparam -  Get Flow Control status
657  * Return tx/rx-pause status
658  **/
659 static void i40e_get_pauseparam(struct net_device *netdev,
660 				struct ethtool_pauseparam *pause)
661 {
662 	struct i40e_netdev_priv *np = netdev_priv(netdev);
663 	struct i40e_pf *pf = np->vsi->back;
664 	struct i40e_hw *hw = &pf->hw;
665 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
666 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
667 
668 	pause->autoneg =
669 		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
670 		  AUTONEG_ENABLE : AUTONEG_DISABLE);
671 
672 	/* PFC enabled so report LFC as off */
673 	if (dcbx_cfg->pfc.pfcenable) {
674 		pause->rx_pause = 0;
675 		pause->tx_pause = 0;
676 		return;
677 	}
678 
679 	if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
680 		pause->rx_pause = 1;
681 	} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
682 		pause->tx_pause = 1;
683 	} else if (hw->fc.current_mode == I40E_FC_FULL) {
684 		pause->rx_pause = 1;
685 		pause->tx_pause = 1;
686 	}
687 }
688 
689 /**
690  * i40e_set_pauseparam - Set Flow Control parameter
691  * @netdev: network interface device structure
692  * @pause: return tx/rx flow control status
693  **/
694 static int i40e_set_pauseparam(struct net_device *netdev,
695 			       struct ethtool_pauseparam *pause)
696 {
697 	struct i40e_netdev_priv *np = netdev_priv(netdev);
698 	struct i40e_pf *pf = np->vsi->back;
699 	struct i40e_vsi *vsi = np->vsi;
700 	struct i40e_hw *hw = &pf->hw;
701 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
702 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
703 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
704 	i40e_status status;
705 	u8 aq_failures;
706 	int err = 0;
707 
708 	/* Changing the port's flow control is not supported if this isn't the
709 	 * port's controlling PF
710 	 */
711 	if (hw->partition_id != 1) {
712 		i40e_partition_setting_complaint(pf);
713 		return -EOPNOTSUPP;
714 	}
715 
716 	if (vsi != pf->vsi[pf->lan_vsi])
717 		return -EOPNOTSUPP;
718 
719 	if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
720 	    AUTONEG_ENABLE : AUTONEG_DISABLE)) {
721 		netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
722 		return -EOPNOTSUPP;
723 	}
724 
725 	/* If we have link and don't have autoneg */
726 	if (!test_bit(__I40E_DOWN, &pf->state) &&
727 	    !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
728 		/* Send message that it might not necessarily work*/
729 		netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
730 	}
731 
732 	if (dcbx_cfg->pfc.pfcenable) {
733 		netdev_info(netdev,
734 			    "Priority flow control enabled. Cannot set link flow control.\n");
735 		return -EOPNOTSUPP;
736 	}
737 
738 	if (pause->rx_pause && pause->tx_pause)
739 		hw->fc.requested_mode = I40E_FC_FULL;
740 	else if (pause->rx_pause && !pause->tx_pause)
741 		hw->fc.requested_mode = I40E_FC_RX_PAUSE;
742 	else if (!pause->rx_pause && pause->tx_pause)
743 		hw->fc.requested_mode = I40E_FC_TX_PAUSE;
744 	else if (!pause->rx_pause && !pause->tx_pause)
745 		hw->fc.requested_mode = I40E_FC_NONE;
746 	else
747 		 return -EINVAL;
748 
749 	/* Tell the OS link is going down, the link will go back up when fw
750 	 * says it is ready asynchronously
751 	 */
752 	netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
753 	netif_carrier_off(netdev);
754 	netif_tx_stop_all_queues(netdev);
755 
756 	/* Set the fc mode and only restart an if link is up*/
757 	status = i40e_set_fc(hw, &aq_failures, link_up);
758 
759 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
760 		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
761 			    status, hw->aq.asq_last_status);
762 		err = -EAGAIN;
763 	}
764 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
765 		netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
766 			    status, hw->aq.asq_last_status);
767 		err = -EAGAIN;
768 	}
769 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
770 		netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n",
771 			    status, hw->aq.asq_last_status);
772 		err = -EAGAIN;
773 	}
774 
775 	if (!test_bit(__I40E_DOWN, &pf->state)) {
776 		/* Give it a little more time to try to come back */
777 		msleep(75);
778 		if (!test_bit(__I40E_DOWN, &pf->state))
779 			return i40e_nway_reset(netdev);
780 	}
781 
782 	return err;
783 }
784 
785 static u32 i40e_get_msglevel(struct net_device *netdev)
786 {
787 	struct i40e_netdev_priv *np = netdev_priv(netdev);
788 	struct i40e_pf *pf = np->vsi->back;
789 
790 	return pf->msg_enable;
791 }
792 
793 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
794 {
795 	struct i40e_netdev_priv *np = netdev_priv(netdev);
796 	struct i40e_pf *pf = np->vsi->back;
797 
798 	if (I40E_DEBUG_USER & data)
799 		pf->hw.debug_mask = data;
800 	pf->msg_enable = data;
801 }
802 
803 static int i40e_get_regs_len(struct net_device *netdev)
804 {
805 	int reg_count = 0;
806 	int i;
807 
808 	for (i = 0; i40e_reg_list[i].offset != 0; i++)
809 		reg_count += i40e_reg_list[i].elements;
810 
811 	return reg_count * sizeof(u32);
812 }
813 
814 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
815 			  void *p)
816 {
817 	struct i40e_netdev_priv *np = netdev_priv(netdev);
818 	struct i40e_pf *pf = np->vsi->back;
819 	struct i40e_hw *hw = &pf->hw;
820 	u32 *reg_buf = p;
821 	int i, j, ri;
822 	u32 reg;
823 
824 	/* Tell ethtool which driver-version-specific regs output we have.
825 	 *
826 	 * At some point, if we have ethtool doing special formatting of
827 	 * this data, it will rely on this version number to know how to
828 	 * interpret things.  Hence, this needs to be updated if/when the
829 	 * diags register table is changed.
830 	 */
831 	regs->version = 1;
832 
833 	/* loop through the diags reg table for what to print */
834 	ri = 0;
835 	for (i = 0; i40e_reg_list[i].offset != 0; i++) {
836 		for (j = 0; j < i40e_reg_list[i].elements; j++) {
837 			reg = i40e_reg_list[i].offset
838 				+ (j * i40e_reg_list[i].stride);
839 			reg_buf[ri++] = rd32(hw, reg);
840 		}
841 	}
842 
843 }
844 
845 static int i40e_get_eeprom(struct net_device *netdev,
846 			   struct ethtool_eeprom *eeprom, u8 *bytes)
847 {
848 	struct i40e_netdev_priv *np = netdev_priv(netdev);
849 	struct i40e_hw *hw = &np->vsi->back->hw;
850 	struct i40e_pf *pf = np->vsi->back;
851 	int ret_val = 0, len, offset;
852 	u8 *eeprom_buff;
853 	u16 i, sectors;
854 	bool last;
855 	u32 magic;
856 
857 #define I40E_NVM_SECTOR_SIZE  4096
858 	if (eeprom->len == 0)
859 		return -EINVAL;
860 
861 	/* check for NVMUpdate access method */
862 	magic = hw->vendor_id | (hw->device_id << 16);
863 	if (eeprom->magic && eeprom->magic != magic) {
864 		struct i40e_nvm_access *cmd;
865 		int errno;
866 
867 		/* make sure it is the right magic for NVMUpdate */
868 		if ((eeprom->magic >> 16) != hw->device_id)
869 			return -EINVAL;
870 
871 		cmd = (struct i40e_nvm_access *)eeprom;
872 		ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
873 		if (ret_val)
874 			dev_info(&pf->pdev->dev,
875 				 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
876 				 ret_val, hw->aq.asq_last_status, errno,
877 				 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
878 				 cmd->offset, cmd->data_size);
879 
880 		return errno;
881 	}
882 
883 	/* normal ethtool get_eeprom support */
884 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
885 
886 	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
887 	if (!eeprom_buff)
888 		return -ENOMEM;
889 
890 	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
891 	if (ret_val) {
892 		dev_info(&pf->pdev->dev,
893 			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
894 			 ret_val, hw->aq.asq_last_status);
895 		goto free_buff;
896 	}
897 
898 	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
899 	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
900 	len = I40E_NVM_SECTOR_SIZE;
901 	last = false;
902 	for (i = 0; i < sectors; i++) {
903 		if (i == (sectors - 1)) {
904 			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
905 			last = true;
906 		}
907 		offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
908 		ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
909 				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
910 				last, NULL);
911 		if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
912 			dev_info(&pf->pdev->dev,
913 				 "read NVM failed, invalid offset 0x%x\n",
914 				 offset);
915 			break;
916 		} else if (ret_val &&
917 			   hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
918 			dev_info(&pf->pdev->dev,
919 				 "read NVM failed, access, offset 0x%x\n",
920 				 offset);
921 			break;
922 		} else if (ret_val) {
923 			dev_info(&pf->pdev->dev,
924 				 "read NVM failed offset %d err=%d status=0x%x\n",
925 				 offset, ret_val, hw->aq.asq_last_status);
926 			break;
927 		}
928 	}
929 
930 	i40e_release_nvm(hw);
931 	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
932 free_buff:
933 	kfree(eeprom_buff);
934 	return ret_val;
935 }
936 
937 static int i40e_get_eeprom_len(struct net_device *netdev)
938 {
939 	struct i40e_netdev_priv *np = netdev_priv(netdev);
940 	struct i40e_hw *hw = &np->vsi->back->hw;
941 	u32 val;
942 
943 	val = (rd32(hw, I40E_GLPCI_LBARCTRL)
944 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
945 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
946 	/* register returns value in power of 2, 64Kbyte chunks. */
947 	val = (64 * 1024) * (1 << val);
948 	return val;
949 }
950 
951 static int i40e_set_eeprom(struct net_device *netdev,
952 			   struct ethtool_eeprom *eeprom, u8 *bytes)
953 {
954 	struct i40e_netdev_priv *np = netdev_priv(netdev);
955 	struct i40e_hw *hw = &np->vsi->back->hw;
956 	struct i40e_pf *pf = np->vsi->back;
957 	struct i40e_nvm_access *cmd;
958 	int ret_val = 0;
959 	int errno;
960 	u32 magic;
961 
962 	/* normal ethtool set_eeprom is not supported */
963 	magic = hw->vendor_id | (hw->device_id << 16);
964 	if (eeprom->magic == magic)
965 		return -EOPNOTSUPP;
966 
967 	/* check for NVMUpdate access method */
968 	if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
969 		return -EINVAL;
970 
971 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
972 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
973 		return -EBUSY;
974 
975 	cmd = (struct i40e_nvm_access *)eeprom;
976 	ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
977 	if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY)
978 		dev_info(&pf->pdev->dev,
979 			 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
980 			 ret_val, hw->aq.asq_last_status, errno,
981 			 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
982 			 cmd->offset, cmd->data_size);
983 
984 	return errno;
985 }
986 
987 static void i40e_get_drvinfo(struct net_device *netdev,
988 			     struct ethtool_drvinfo *drvinfo)
989 {
990 	struct i40e_netdev_priv *np = netdev_priv(netdev);
991 	struct i40e_vsi *vsi = np->vsi;
992 	struct i40e_pf *pf = vsi->back;
993 
994 	strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
995 	strlcpy(drvinfo->version, i40e_driver_version_str,
996 		sizeof(drvinfo->version));
997 	strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
998 		sizeof(drvinfo->fw_version));
999 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
1000 		sizeof(drvinfo->bus_info));
1001 }
1002 
1003 static void i40e_get_ringparam(struct net_device *netdev,
1004 			       struct ethtool_ringparam *ring)
1005 {
1006 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1007 	struct i40e_pf *pf = np->vsi->back;
1008 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
1009 
1010 	ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1011 	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1012 	ring->rx_mini_max_pending = 0;
1013 	ring->rx_jumbo_max_pending = 0;
1014 	ring->rx_pending = vsi->rx_rings[0]->count;
1015 	ring->tx_pending = vsi->tx_rings[0]->count;
1016 	ring->rx_mini_pending = 0;
1017 	ring->rx_jumbo_pending = 0;
1018 }
1019 
1020 static int i40e_set_ringparam(struct net_device *netdev,
1021 			      struct ethtool_ringparam *ring)
1022 {
1023 	struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
1024 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1025 	struct i40e_vsi *vsi = np->vsi;
1026 	struct i40e_pf *pf = vsi->back;
1027 	u32 new_rx_count, new_tx_count;
1028 	int i, err = 0;
1029 
1030 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1031 		return -EINVAL;
1032 
1033 	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1034 	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
1035 	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1036 	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
1037 		netdev_info(netdev,
1038 			    "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
1039 			    ring->tx_pending, ring->rx_pending,
1040 			    I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
1041 		return -EINVAL;
1042 	}
1043 
1044 	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1045 	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1046 
1047 	/* if nothing to do return success */
1048 	if ((new_tx_count == vsi->tx_rings[0]->count) &&
1049 	    (new_rx_count == vsi->rx_rings[0]->count))
1050 		return 0;
1051 
1052 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
1053 		usleep_range(1000, 2000);
1054 
1055 	if (!netif_running(vsi->netdev)) {
1056 		/* simple case - set for the next time the netdev is started */
1057 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1058 			vsi->tx_rings[i]->count = new_tx_count;
1059 			vsi->rx_rings[i]->count = new_rx_count;
1060 		}
1061 		goto done;
1062 	}
1063 
1064 	/* We can't just free everything and then setup again,
1065 	 * because the ISRs in MSI-X mode get passed pointers
1066 	 * to the Tx and Rx ring structs.
1067 	 */
1068 
1069 	/* alloc updated Tx resources */
1070 	if (new_tx_count != vsi->tx_rings[0]->count) {
1071 		netdev_info(netdev,
1072 			    "Changing Tx descriptor count from %d to %d.\n",
1073 			    vsi->tx_rings[0]->count, new_tx_count);
1074 		tx_rings = kcalloc(vsi->alloc_queue_pairs,
1075 				   sizeof(struct i40e_ring), GFP_KERNEL);
1076 		if (!tx_rings) {
1077 			err = -ENOMEM;
1078 			goto done;
1079 		}
1080 
1081 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1082 			/* clone ring and setup updated count */
1083 			tx_rings[i] = *vsi->tx_rings[i];
1084 			tx_rings[i].count = new_tx_count;
1085 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
1086 			if (err) {
1087 				while (i) {
1088 					i--;
1089 					i40e_free_tx_resources(&tx_rings[i]);
1090 				}
1091 				kfree(tx_rings);
1092 				tx_rings = NULL;
1093 
1094 				goto done;
1095 			}
1096 		}
1097 	}
1098 
1099 	/* alloc updated Rx resources */
1100 	if (new_rx_count != vsi->rx_rings[0]->count) {
1101 		netdev_info(netdev,
1102 			    "Changing Rx descriptor count from %d to %d\n",
1103 			    vsi->rx_rings[0]->count, new_rx_count);
1104 		rx_rings = kcalloc(vsi->alloc_queue_pairs,
1105 				   sizeof(struct i40e_ring), GFP_KERNEL);
1106 		if (!rx_rings) {
1107 			err = -ENOMEM;
1108 			goto free_tx;
1109 		}
1110 
1111 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1112 			/* clone ring and setup updated count */
1113 			rx_rings[i] = *vsi->rx_rings[i];
1114 			rx_rings[i].count = new_rx_count;
1115 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
1116 			if (err) {
1117 				while (i) {
1118 					i--;
1119 					i40e_free_rx_resources(&rx_rings[i]);
1120 				}
1121 				kfree(rx_rings);
1122 				rx_rings = NULL;
1123 
1124 				goto free_tx;
1125 			}
1126 		}
1127 	}
1128 
1129 	/* Bring interface down, copy in the new ring info,
1130 	 * then restore the interface
1131 	 */
1132 	i40e_down(vsi);
1133 
1134 	if (tx_rings) {
1135 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1136 			i40e_free_tx_resources(vsi->tx_rings[i]);
1137 			*vsi->tx_rings[i] = tx_rings[i];
1138 		}
1139 		kfree(tx_rings);
1140 		tx_rings = NULL;
1141 	}
1142 
1143 	if (rx_rings) {
1144 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1145 			i40e_free_rx_resources(vsi->rx_rings[i]);
1146 			*vsi->rx_rings[i] = rx_rings[i];
1147 		}
1148 		kfree(rx_rings);
1149 		rx_rings = NULL;
1150 	}
1151 
1152 	i40e_up(vsi);
1153 
1154 free_tx:
1155 	/* error cleanup if the Rx allocations failed after getting Tx */
1156 	if (tx_rings) {
1157 		for (i = 0; i < vsi->num_queue_pairs; i++)
1158 			i40e_free_tx_resources(&tx_rings[i]);
1159 		kfree(tx_rings);
1160 		tx_rings = NULL;
1161 	}
1162 
1163 done:
1164 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
1165 
1166 	return err;
1167 }
1168 
1169 static int i40e_get_sset_count(struct net_device *netdev, int sset)
1170 {
1171 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1172 	struct i40e_vsi *vsi = np->vsi;
1173 	struct i40e_pf *pf = vsi->back;
1174 
1175 	switch (sset) {
1176 	case ETH_SS_TEST:
1177 		return I40E_TEST_LEN;
1178 	case ETH_SS_STATS:
1179 		if (vsi == pf->vsi[pf->lan_vsi]) {
1180 			int len = I40E_PF_STATS_LEN(netdev);
1181 
1182 			if (pf->lan_veb != I40E_NO_VEB)
1183 				len += I40E_VEB_STATS_LEN;
1184 			return len;
1185 		} else {
1186 			return I40E_VSI_STATS_LEN(netdev);
1187 		}
1188 	default:
1189 		return -EOPNOTSUPP;
1190 	}
1191 }
1192 
1193 static void i40e_get_ethtool_stats(struct net_device *netdev,
1194 				   struct ethtool_stats *stats, u64 *data)
1195 {
1196 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1197 	struct i40e_ring *tx_ring, *rx_ring;
1198 	struct i40e_vsi *vsi = np->vsi;
1199 	struct i40e_pf *pf = vsi->back;
1200 	int i = 0;
1201 	char *p;
1202 	int j;
1203 	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
1204 	unsigned int start;
1205 
1206 	i40e_update_stats(vsi);
1207 
1208 	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
1209 		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
1210 		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
1211 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1212 	}
1213 	for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
1214 		p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
1215 		data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
1216 			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1217 	}
1218 #ifdef I40E_FCOE
1219 	for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
1220 		p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
1221 		data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
1222 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1223 	}
1224 #endif
1225 	rcu_read_lock();
1226 	for (j = 0; j < vsi->num_queue_pairs; j++) {
1227 		tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
1228 
1229 		if (!tx_ring)
1230 			continue;
1231 
1232 		/* process Tx ring statistics */
1233 		do {
1234 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
1235 			data[i] = tx_ring->stats.packets;
1236 			data[i + 1] = tx_ring->stats.bytes;
1237 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1238 		i += 2;
1239 
1240 		/* Rx ring is the 2nd half of the queue pair */
1241 		rx_ring = &tx_ring[1];
1242 		do {
1243 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
1244 			data[i] = rx_ring->stats.packets;
1245 			data[i + 1] = rx_ring->stats.bytes;
1246 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
1247 		i += 2;
1248 	}
1249 	rcu_read_unlock();
1250 	if (vsi != pf->vsi[pf->lan_vsi])
1251 		return;
1252 
1253 	if (pf->lan_veb != I40E_NO_VEB) {
1254 		struct i40e_veb *veb = pf->veb[pf->lan_veb];
1255 		for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
1256 			p = (char *)veb;
1257 			p += i40e_gstrings_veb_stats[j].stat_offset;
1258 			data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
1259 				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1260 		}
1261 	}
1262 	for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
1263 		p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
1264 		data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
1265 			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1266 	}
1267 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1268 		data[i++] = pf->stats.priority_xon_tx[j];
1269 		data[i++] = pf->stats.priority_xoff_tx[j];
1270 	}
1271 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
1272 		data[i++] = pf->stats.priority_xon_rx[j];
1273 		data[i++] = pf->stats.priority_xoff_rx[j];
1274 	}
1275 	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
1276 		data[i++] = pf->stats.priority_xon_2_xoff[j];
1277 }
1278 
1279 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
1280 			     u8 *data)
1281 {
1282 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1283 	struct i40e_vsi *vsi = np->vsi;
1284 	struct i40e_pf *pf = vsi->back;
1285 	char *p = (char *)data;
1286 	int i;
1287 
1288 	switch (stringset) {
1289 	case ETH_SS_TEST:
1290 		for (i = 0; i < I40E_TEST_LEN; i++) {
1291 			memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
1292 			data += ETH_GSTRING_LEN;
1293 		}
1294 		break;
1295 	case ETH_SS_STATS:
1296 		for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
1297 			snprintf(p, ETH_GSTRING_LEN, "%s",
1298 				 i40e_gstrings_net_stats[i].stat_string);
1299 			p += ETH_GSTRING_LEN;
1300 		}
1301 		for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
1302 			snprintf(p, ETH_GSTRING_LEN, "%s",
1303 				 i40e_gstrings_misc_stats[i].stat_string);
1304 			p += ETH_GSTRING_LEN;
1305 		}
1306 #ifdef I40E_FCOE
1307 		for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
1308 			snprintf(p, ETH_GSTRING_LEN, "%s",
1309 				 i40e_gstrings_fcoe_stats[i].stat_string);
1310 			p += ETH_GSTRING_LEN;
1311 		}
1312 #endif
1313 		for (i = 0; i < vsi->num_queue_pairs; i++) {
1314 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
1315 			p += ETH_GSTRING_LEN;
1316 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
1317 			p += ETH_GSTRING_LEN;
1318 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
1319 			p += ETH_GSTRING_LEN;
1320 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
1321 			p += ETH_GSTRING_LEN;
1322 		}
1323 		if (vsi != pf->vsi[pf->lan_vsi])
1324 			return;
1325 
1326 		if (pf->lan_veb != I40E_NO_VEB) {
1327 			for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
1328 				snprintf(p, ETH_GSTRING_LEN, "veb.%s",
1329 					i40e_gstrings_veb_stats[i].stat_string);
1330 				p += ETH_GSTRING_LEN;
1331 			}
1332 		}
1333 		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
1334 			snprintf(p, ETH_GSTRING_LEN, "port.%s",
1335 				 i40e_gstrings_stats[i].stat_string);
1336 			p += ETH_GSTRING_LEN;
1337 		}
1338 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1339 			snprintf(p, ETH_GSTRING_LEN,
1340 				 "port.tx_priority_%u_xon", i);
1341 			p += ETH_GSTRING_LEN;
1342 			snprintf(p, ETH_GSTRING_LEN,
1343 				 "port.tx_priority_%u_xoff", i);
1344 			p += ETH_GSTRING_LEN;
1345 		}
1346 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1347 			snprintf(p, ETH_GSTRING_LEN,
1348 				 "port.rx_priority_%u_xon", i);
1349 			p += ETH_GSTRING_LEN;
1350 			snprintf(p, ETH_GSTRING_LEN,
1351 				 "port.rx_priority_%u_xoff", i);
1352 			p += ETH_GSTRING_LEN;
1353 		}
1354 		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
1355 			snprintf(p, ETH_GSTRING_LEN,
1356 				 "port.rx_priority_%u_xon_2_xoff", i);
1357 			p += ETH_GSTRING_LEN;
1358 		}
1359 		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
1360 		break;
1361 	}
1362 }
1363 
1364 static int i40e_get_ts_info(struct net_device *dev,
1365 			    struct ethtool_ts_info *info)
1366 {
1367 	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
1368 
1369 	/* only report HW timestamping if PTP is enabled */
1370 	if (!(pf->flags & I40E_FLAG_PTP))
1371 		return ethtool_op_get_ts_info(dev, info);
1372 
1373 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1374 				SOF_TIMESTAMPING_RX_SOFTWARE |
1375 				SOF_TIMESTAMPING_SOFTWARE |
1376 				SOF_TIMESTAMPING_TX_HARDWARE |
1377 				SOF_TIMESTAMPING_RX_HARDWARE |
1378 				SOF_TIMESTAMPING_RAW_HARDWARE;
1379 
1380 	if (pf->ptp_clock)
1381 		info->phc_index = ptp_clock_index(pf->ptp_clock);
1382 	else
1383 		info->phc_index = -1;
1384 
1385 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1386 
1387 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1388 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1389 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1390 			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1391 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1392 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1393 			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1394 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1395 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1396 			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
1397 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1398 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1399 
1400 	return 0;
1401 }
1402 
1403 static int i40e_link_test(struct net_device *netdev, u64 *data)
1404 {
1405 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1406 	struct i40e_pf *pf = np->vsi->back;
1407 
1408 	netif_info(pf, hw, netdev, "link test\n");
1409 	if (i40e_get_link_status(&pf->hw))
1410 		*data = 0;
1411 	else
1412 		*data = 1;
1413 
1414 	return *data;
1415 }
1416 
1417 static int i40e_reg_test(struct net_device *netdev, u64 *data)
1418 {
1419 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1420 	struct i40e_pf *pf = np->vsi->back;
1421 
1422 	netif_info(pf, hw, netdev, "register test\n");
1423 	*data = i40e_diag_reg_test(&pf->hw);
1424 
1425 	return *data;
1426 }
1427 
1428 static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
1429 {
1430 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1431 	struct i40e_pf *pf = np->vsi->back;
1432 
1433 	netif_info(pf, hw, netdev, "eeprom test\n");
1434 	*data = i40e_diag_eeprom_test(&pf->hw);
1435 
1436 	/* forcebly clear the NVM Update state machine */
1437 	pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
1438 
1439 	return *data;
1440 }
1441 
1442 static int i40e_intr_test(struct net_device *netdev, u64 *data)
1443 {
1444 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1445 	struct i40e_pf *pf = np->vsi->back;
1446 	u16 swc_old = pf->sw_int_count;
1447 
1448 	netif_info(pf, hw, netdev, "interrupt test\n");
1449 	wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
1450 	     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
1451 	      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1452 	      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
1453 	      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
1454 	      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
1455 	usleep_range(1000, 2000);
1456 	*data = (swc_old == pf->sw_int_count);
1457 
1458 	return *data;
1459 }
1460 
1461 static int i40e_loopback_test(struct net_device *netdev, u64 *data)
1462 {
1463 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1464 	struct i40e_pf *pf = np->vsi->back;
1465 
1466 	netif_info(pf, hw, netdev, "loopback test not implemented\n");
1467 	*data = 0;
1468 
1469 	return *data;
1470 }
1471 
1472 static void i40e_diag_test(struct net_device *netdev,
1473 			   struct ethtool_test *eth_test, u64 *data)
1474 {
1475 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1476 	struct i40e_pf *pf = np->vsi->back;
1477 
1478 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1479 		/* Offline tests */
1480 		netif_info(pf, drv, netdev, "offline testing starting\n");
1481 
1482 		set_bit(__I40E_TESTING, &pf->state);
1483 
1484 		/* Link test performed before hardware reset
1485 		 * so autoneg doesn't interfere with test result
1486 		 */
1487 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1488 			eth_test->flags |= ETH_TEST_FL_FAILED;
1489 
1490 		if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
1491 			eth_test->flags |= ETH_TEST_FL_FAILED;
1492 
1493 		if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
1494 			eth_test->flags |= ETH_TEST_FL_FAILED;
1495 
1496 		if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
1497 			eth_test->flags |= ETH_TEST_FL_FAILED;
1498 
1499 		/* run reg test last, a reset is required after it */
1500 		if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
1501 			eth_test->flags |= ETH_TEST_FL_FAILED;
1502 
1503 		clear_bit(__I40E_TESTING, &pf->state);
1504 		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
1505 	} else {
1506 		/* Online tests */
1507 		netif_info(pf, drv, netdev, "online testing starting\n");
1508 
1509 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
1510 			eth_test->flags |= ETH_TEST_FL_FAILED;
1511 
1512 		/* Offline only tests, not run in online; pass by default */
1513 		data[I40E_ETH_TEST_REG] = 0;
1514 		data[I40E_ETH_TEST_EEPROM] = 0;
1515 		data[I40E_ETH_TEST_INTR] = 0;
1516 		data[I40E_ETH_TEST_LOOPBACK] = 0;
1517 	}
1518 
1519 	netif_info(pf, drv, netdev, "testing finished\n");
1520 }
1521 
1522 static void i40e_get_wol(struct net_device *netdev,
1523 			 struct ethtool_wolinfo *wol)
1524 {
1525 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1526 	struct i40e_pf *pf = np->vsi->back;
1527 	struct i40e_hw *hw = &pf->hw;
1528 	u16 wol_nvm_bits;
1529 
1530 	/* NVM bit on means WoL disabled for the port */
1531 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1532 	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
1533 		wol->supported = 0;
1534 		wol->wolopts = 0;
1535 	} else {
1536 		wol->supported = WAKE_MAGIC;
1537 		wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
1538 	}
1539 }
1540 
1541 /**
1542  * i40e_set_wol - set the WakeOnLAN configuration
1543  * @netdev: the netdev in question
1544  * @wol: the ethtool WoL setting data
1545  **/
1546 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1547 {
1548 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1549 	struct i40e_pf *pf = np->vsi->back;
1550 	struct i40e_vsi *vsi = np->vsi;
1551 	struct i40e_hw *hw = &pf->hw;
1552 	u16 wol_nvm_bits;
1553 
1554 	/* WoL not supported if this isn't the controlling PF on the port */
1555 	if (hw->partition_id != 1) {
1556 		i40e_partition_setting_complaint(pf);
1557 		return -EOPNOTSUPP;
1558 	}
1559 
1560 	if (vsi != pf->vsi[pf->lan_vsi])
1561 		return -EOPNOTSUPP;
1562 
1563 	/* NVM bit on means WoL disabled for the port */
1564 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1565 	if (((1 << hw->port) & wol_nvm_bits))
1566 		return -EOPNOTSUPP;
1567 
1568 	/* only magic packet is supported */
1569 	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
1570 		return -EOPNOTSUPP;
1571 
1572 	/* is this a new value? */
1573 	if (pf->wol_en != !!wol->wolopts) {
1574 		pf->wol_en = !!wol->wolopts;
1575 		device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 static int i40e_set_phys_id(struct net_device *netdev,
1582 			    enum ethtool_phys_id_state state)
1583 {
1584 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1585 	struct i40e_pf *pf = np->vsi->back;
1586 	struct i40e_hw *hw = &pf->hw;
1587 	int blink_freq = 2;
1588 
1589 	switch (state) {
1590 	case ETHTOOL_ID_ACTIVE:
1591 		pf->led_status = i40e_led_get(hw);
1592 		return blink_freq;
1593 	case ETHTOOL_ID_ON:
1594 		i40e_led_set(hw, 0xF, false);
1595 		break;
1596 	case ETHTOOL_ID_OFF:
1597 		i40e_led_set(hw, 0x0, false);
1598 		break;
1599 	case ETHTOOL_ID_INACTIVE:
1600 		i40e_led_set(hw, pf->led_status, false);
1601 		break;
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
1608  * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
1609  * 125us (8000 interrupts per second) == ITR(62)
1610  */
1611 
1612 static int i40e_get_coalesce(struct net_device *netdev,
1613 			     struct ethtool_coalesce *ec)
1614 {
1615 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1616 	struct i40e_vsi *vsi = np->vsi;
1617 
1618 	ec->tx_max_coalesced_frames_irq = vsi->work_limit;
1619 	ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1620 
1621 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1622 		ec->use_adaptive_rx_coalesce = 1;
1623 
1624 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1625 		ec->use_adaptive_tx_coalesce = 1;
1626 
1627 	ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
1628 	ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
1629 
1630 	return 0;
1631 }
1632 
1633 static int i40e_set_coalesce(struct net_device *netdev,
1634 			     struct ethtool_coalesce *ec)
1635 {
1636 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1637 	struct i40e_q_vector *q_vector;
1638 	struct i40e_vsi *vsi = np->vsi;
1639 	struct i40e_pf *pf = vsi->back;
1640 	struct i40e_hw *hw = &pf->hw;
1641 	u16 vector;
1642 	int i;
1643 
1644 	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1645 		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1646 
1647 	vector = vsi->base_vector;
1648 	if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1649 	    (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
1650 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1651 	} else if (ec->rx_coalesce_usecs == 0) {
1652 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1653 		if (ec->use_adaptive_rx_coalesce)
1654 			netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
1655 	} else {
1656 		netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
1657 		return -EINVAL;
1658 	}
1659 
1660 	if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1661 	    (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
1662 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1663 	} else if (ec->tx_coalesce_usecs == 0) {
1664 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1665 		if (ec->use_adaptive_tx_coalesce)
1666 			netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
1667 	} else {
1668 		netif_info(pf, drv, netdev,
1669 			   "Invalid value, tx-usecs range is 0-8160\n");
1670 		return -EINVAL;
1671 	}
1672 
1673 	if (ec->use_adaptive_rx_coalesce)
1674 		vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
1675 	else
1676 		vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
1677 
1678 	if (ec->use_adaptive_tx_coalesce)
1679 		vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
1680 	else
1681 		vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
1682 
1683 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1684 		q_vector = vsi->q_vectors[i];
1685 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1686 		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1687 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1688 		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1689 		i40e_flush(hw);
1690 	}
1691 
1692 	return 0;
1693 }
1694 
1695 /**
1696  * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1697  * @pf: pointer to the physical function struct
1698  * @cmd: ethtool rxnfc command
1699  *
1700  * Returns Success if the flow is supported, else Invalid Input.
1701  **/
1702 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1703 {
1704 	cmd->data = 0;
1705 
1706 	/* Report default options for RSS on i40e */
1707 	switch (cmd->flow_type) {
1708 	case TCP_V4_FLOW:
1709 	case UDP_V4_FLOW:
1710 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1711 	/* fall through to add IP fields */
1712 	case SCTP_V4_FLOW:
1713 	case AH_ESP_V4_FLOW:
1714 	case AH_V4_FLOW:
1715 	case ESP_V4_FLOW:
1716 	case IPV4_FLOW:
1717 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1718 		break;
1719 	case TCP_V6_FLOW:
1720 	case UDP_V6_FLOW:
1721 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1722 	/* fall through to add IP fields */
1723 	case SCTP_V6_FLOW:
1724 	case AH_ESP_V6_FLOW:
1725 	case AH_V6_FLOW:
1726 	case ESP_V6_FLOW:
1727 	case IPV6_FLOW:
1728 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1729 		break;
1730 	default:
1731 		return -EINVAL;
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 /**
1738  * i40e_get_ethtool_fdir_all - Populates the rule count of a command
1739  * @pf: Pointer to the physical function struct
1740  * @cmd: The command to get or set Rx flow classification rules
1741  * @rule_locs: Array of used rule locations
1742  *
1743  * This function populates both the total and actual rule count of
1744  * the ethtool flow classification command
1745  *
1746  * Returns 0 on success or -EMSGSIZE if entry not found
1747  **/
1748 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1749 				     struct ethtool_rxnfc *cmd,
1750 				     u32 *rule_locs)
1751 {
1752 	struct i40e_fdir_filter *rule;
1753 	struct hlist_node *node2;
1754 	int cnt = 0;
1755 
1756 	/* report total rule count */
1757 	cmd->data = i40e_get_fd_cnt_all(pf);
1758 
1759 	hlist_for_each_entry_safe(rule, node2,
1760 				  &pf->fdir_filter_list, fdir_node) {
1761 		if (cnt == cmd->rule_cnt)
1762 			return -EMSGSIZE;
1763 
1764 		rule_locs[cnt] = rule->fd_id;
1765 		cnt++;
1766 	}
1767 
1768 	cmd->rule_cnt = cnt;
1769 
1770 	return 0;
1771 }
1772 
1773 /**
1774  * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
1775  * @pf: Pointer to the physical function struct
1776  * @cmd: The command to get or set Rx flow classification rules
1777  *
1778  * This function looks up a filter based on the Rx flow classification
1779  * command and fills the flow spec info for it if found
1780  *
1781  * Returns 0 on success or -EINVAL if filter not found
1782  **/
1783 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1784 				       struct ethtool_rxnfc *cmd)
1785 {
1786 	struct ethtool_rx_flow_spec *fsp =
1787 			(struct ethtool_rx_flow_spec *)&cmd->fs;
1788 	struct i40e_fdir_filter *rule = NULL;
1789 	struct hlist_node *node2;
1790 
1791 	hlist_for_each_entry_safe(rule, node2,
1792 				  &pf->fdir_filter_list, fdir_node) {
1793 		if (fsp->location <= rule->fd_id)
1794 			break;
1795 	}
1796 
1797 	if (!rule || fsp->location != rule->fd_id)
1798 		return -EINVAL;
1799 
1800 	fsp->flow_type = rule->flow_type;
1801 	if (fsp->flow_type == IP_USER_FLOW) {
1802 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1803 		fsp->h_u.usr_ip4_spec.proto = 0;
1804 		fsp->m_u.usr_ip4_spec.proto = 0;
1805 	}
1806 
1807 	/* Reverse the src and dest notion, since the HW views them from
1808 	 * Tx perspective where as the user expects it from Rx filter view.
1809 	 */
1810 	fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
1811 	fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
1812 	fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
1813 	fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
1814 
1815 	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
1816 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1817 	else
1818 		fsp->ring_cookie = rule->q_index;
1819 
1820 	return 0;
1821 }
1822 
1823 /**
1824  * i40e_get_rxnfc - command to get RX flow classification rules
1825  * @netdev: network interface device structure
1826  * @cmd: ethtool rxnfc command
1827  *
1828  * Returns Success if the command is supported.
1829  **/
1830 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1831 			  u32 *rule_locs)
1832 {
1833 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1834 	struct i40e_vsi *vsi = np->vsi;
1835 	struct i40e_pf *pf = vsi->back;
1836 	int ret = -EOPNOTSUPP;
1837 
1838 	switch (cmd->cmd) {
1839 	case ETHTOOL_GRXRINGS:
1840 		cmd->data = vsi->alloc_queue_pairs;
1841 		ret = 0;
1842 		break;
1843 	case ETHTOOL_GRXFH:
1844 		ret = i40e_get_rss_hash_opts(pf, cmd);
1845 		break;
1846 	case ETHTOOL_GRXCLSRLCNT:
1847 		cmd->rule_cnt = pf->fdir_pf_active_filters;
1848 		/* report total rule count */
1849 		cmd->data = i40e_get_fd_cnt_all(pf);
1850 		ret = 0;
1851 		break;
1852 	case ETHTOOL_GRXCLSRULE:
1853 		ret = i40e_get_ethtool_fdir_entry(pf, cmd);
1854 		break;
1855 	case ETHTOOL_GRXCLSRLALL:
1856 		ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
1857 		break;
1858 	default:
1859 		break;
1860 	}
1861 
1862 	return ret;
1863 }
1864 
1865 /**
1866  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1867  * @pf: pointer to the physical function struct
1868  * @cmd: ethtool rxnfc command
1869  *
1870  * Returns Success if the flow input set is supported.
1871  **/
1872 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1873 {
1874 	struct i40e_hw *hw = &pf->hw;
1875 	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1876 		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1877 
1878 	/* RSS does not support anything other than hashing
1879 	 * to queues on src and dst IPs and ports
1880 	 */
1881 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1882 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
1883 		return -EINVAL;
1884 
1885 	/* We need at least the IP SRC and DEST fields for hashing */
1886 	if (!(nfc->data & RXH_IP_SRC) ||
1887 	    !(nfc->data & RXH_IP_DST))
1888 		return -EINVAL;
1889 
1890 	switch (nfc->flow_type) {
1891 	case TCP_V4_FLOW:
1892 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1893 		case 0:
1894 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1895 			break;
1896 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1897 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1898 			break;
1899 		default:
1900 			return -EINVAL;
1901 		}
1902 		break;
1903 	case TCP_V6_FLOW:
1904 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1905 		case 0:
1906 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1907 			break;
1908 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1909 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1910 			break;
1911 		default:
1912 			return -EINVAL;
1913 		}
1914 		break;
1915 	case UDP_V4_FLOW:
1916 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1917 		case 0:
1918 			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1919 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1920 			break;
1921 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1922 			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1923 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1924 			break;
1925 		default:
1926 			return -EINVAL;
1927 		}
1928 		break;
1929 	case UDP_V6_FLOW:
1930 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1931 		case 0:
1932 			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1933 				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1934 			break;
1935 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1936 			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1937 				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1938 			break;
1939 		default:
1940 			return -EINVAL;
1941 		}
1942 		break;
1943 	case AH_ESP_V4_FLOW:
1944 	case AH_V4_FLOW:
1945 	case ESP_V4_FLOW:
1946 	case SCTP_V4_FLOW:
1947 		if ((nfc->data & RXH_L4_B_0_1) ||
1948 		    (nfc->data & RXH_L4_B_2_3))
1949 			return -EINVAL;
1950 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1951 		break;
1952 	case AH_ESP_V6_FLOW:
1953 	case AH_V6_FLOW:
1954 	case ESP_V6_FLOW:
1955 	case SCTP_V6_FLOW:
1956 		if ((nfc->data & RXH_L4_B_0_1) ||
1957 		    (nfc->data & RXH_L4_B_2_3))
1958 			return -EINVAL;
1959 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1960 		break;
1961 	case IPV4_FLOW:
1962 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
1963 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
1964 		break;
1965 	case IPV6_FLOW:
1966 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
1967 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1968 		break;
1969 	default:
1970 		return -EINVAL;
1971 	}
1972 
1973 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
1974 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1975 	i40e_flush(hw);
1976 
1977 	return 0;
1978 }
1979 
1980 /**
1981  * i40e_match_fdir_input_set - Match a new filter against an existing one
1982  * @rule: The filter already added
1983  * @input: The new filter to comapre against
1984  *
1985  * Returns true if the two input set match
1986  **/
1987 static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
1988 				      struct i40e_fdir_filter *input)
1989 {
1990 	if ((rule->dst_ip[0] != input->dst_ip[0]) ||
1991 	    (rule->src_ip[0] != input->src_ip[0]) ||
1992 	    (rule->dst_port != input->dst_port) ||
1993 	    (rule->src_port != input->src_port))
1994 		return false;
1995 	return true;
1996 }
1997 
1998 /**
1999  * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
2000  * @vsi: Pointer to the targeted VSI
2001  * @input: The filter to update or NULL to indicate deletion
2002  * @sw_idx: Software index to the filter
2003  * @cmd: The command to get or set Rx flow classification rules
2004  *
2005  * This function updates (or deletes) a Flow Director entry from
2006  * the hlist of the corresponding PF
2007  *
2008  * Returns 0 on success
2009  **/
2010 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
2011 					  struct i40e_fdir_filter *input,
2012 					  u16 sw_idx,
2013 					  struct ethtool_rxnfc *cmd)
2014 {
2015 	struct i40e_fdir_filter *rule, *parent;
2016 	struct i40e_pf *pf = vsi->back;
2017 	struct hlist_node *node2;
2018 	int err = -EINVAL;
2019 
2020 	parent = NULL;
2021 	rule = NULL;
2022 
2023 	hlist_for_each_entry_safe(rule, node2,
2024 				  &pf->fdir_filter_list, fdir_node) {
2025 		/* hash found, or no matching entry */
2026 		if (rule->fd_id >= sw_idx)
2027 			break;
2028 		parent = rule;
2029 	}
2030 
2031 	/* if there is an old rule occupying our place remove it */
2032 	if (rule && (rule->fd_id == sw_idx)) {
2033 		if (input && !i40e_match_fdir_input_set(rule, input))
2034 			err = i40e_add_del_fdir(vsi, rule, false);
2035 		else if (!input)
2036 			err = i40e_add_del_fdir(vsi, rule, false);
2037 		hlist_del(&rule->fdir_node);
2038 		kfree(rule);
2039 		pf->fdir_pf_active_filters--;
2040 	}
2041 
2042 	/* If no input this was a delete, err should be 0 if a rule was
2043 	 * successfully found and removed from the list else -EINVAL
2044 	 */
2045 	if (!input)
2046 		return err;
2047 
2048 	/* initialize node and set software index */
2049 	INIT_HLIST_NODE(&input->fdir_node);
2050 
2051 	/* add filter to the list */
2052 	if (parent)
2053 		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2054 	else
2055 		hlist_add_head(&input->fdir_node,
2056 			       &pf->fdir_filter_list);
2057 
2058 	/* update counts */
2059 	pf->fdir_pf_active_filters++;
2060 
2061 	return 0;
2062 }
2063 
2064 /**
2065  * i40e_del_fdir_entry - Deletes a Flow Director filter entry
2066  * @vsi: Pointer to the targeted VSI
2067  * @cmd: The command to get or set Rx flow classification rules
2068  *
2069  * The function removes a Flow Director filter entry from the
2070  * hlist of the corresponding PF
2071  *
2072  * Returns 0 on success
2073  */
2074 static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
2075 			       struct ethtool_rxnfc *cmd)
2076 {
2077 	struct ethtool_rx_flow_spec *fsp =
2078 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2079 	struct i40e_pf *pf = vsi->back;
2080 	int ret = 0;
2081 
2082 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2083 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2084 		return -EBUSY;
2085 
2086 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2087 		return -EBUSY;
2088 
2089 	ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
2090 
2091 	i40e_fdir_check_and_reenable(pf);
2092 	return ret;
2093 }
2094 
2095 /**
2096  * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
2097  * @vsi: pointer to the targeted VSI
2098  * @cmd: command to get or set RX flow classification rules
2099  *
2100  * Add Flow Director filters for a specific flow spec based on their
2101  * protocol.  Returns 0 if the filters were successfully added.
2102  **/
2103 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2104 				 struct ethtool_rxnfc *cmd)
2105 {
2106 	struct ethtool_rx_flow_spec *fsp;
2107 	struct i40e_fdir_filter *input;
2108 	struct i40e_pf *pf;
2109 	int ret = -EINVAL;
2110 
2111 	if (!vsi)
2112 		return -EINVAL;
2113 
2114 	pf = vsi->back;
2115 
2116 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2117 		return -EOPNOTSUPP;
2118 
2119 	if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
2120 		return -ENOSPC;
2121 
2122 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2123 	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2124 		return -EBUSY;
2125 
2126 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2127 		return -EBUSY;
2128 
2129 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2130 
2131 	if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
2132 			      pf->hw.func_caps.fd_filters_guaranteed)) {
2133 		return -EINVAL;
2134 	}
2135 
2136 	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2137 	    (fsp->ring_cookie >= vsi->num_queue_pairs))
2138 		return -EINVAL;
2139 
2140 	input = kzalloc(sizeof(*input), GFP_KERNEL);
2141 
2142 	if (!input)
2143 		return -ENOMEM;
2144 
2145 	input->fd_id = fsp->location;
2146 
2147 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2148 		input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2149 	else
2150 		input->dest_ctl =
2151 			     I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
2152 
2153 	input->q_index = fsp->ring_cookie;
2154 	input->flex_off = 0;
2155 	input->pctype = 0;
2156 	input->dest_vsi = vsi->id;
2157 	input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
2158 	input->cnt_index  = pf->fd_sb_cnt_idx;
2159 	input->flow_type = fsp->flow_type;
2160 	input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
2161 
2162 	/* Reverse the src and dest notion, since the HW expects them to be from
2163 	 * Tx perspective where as the input from user is from Rx filter view.
2164 	 */
2165 	input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
2166 	input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
2167 	input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2168 	input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2169 
2170 	ret = i40e_add_del_fdir(vsi, input, true);
2171 	if (ret)
2172 		kfree(input);
2173 	else
2174 		i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
2175 
2176 	return ret;
2177 }
2178 
2179 /**
2180  * i40e_set_rxnfc - command to set RX flow classification rules
2181  * @netdev: network interface device structure
2182  * @cmd: ethtool rxnfc command
2183  *
2184  * Returns Success if the command is supported.
2185  **/
2186 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2187 {
2188 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2189 	struct i40e_vsi *vsi = np->vsi;
2190 	struct i40e_pf *pf = vsi->back;
2191 	int ret = -EOPNOTSUPP;
2192 
2193 	switch (cmd->cmd) {
2194 	case ETHTOOL_SRXFH:
2195 		ret = i40e_set_rss_hash_opt(pf, cmd);
2196 		break;
2197 	case ETHTOOL_SRXCLSRLINS:
2198 		ret = i40e_add_fdir_ethtool(vsi, cmd);
2199 		break;
2200 	case ETHTOOL_SRXCLSRLDEL:
2201 		ret = i40e_del_fdir_entry(vsi, cmd);
2202 		break;
2203 	default:
2204 		break;
2205 	}
2206 
2207 	return ret;
2208 }
2209 
2210 /**
2211  * i40e_max_channels - get Max number of combined channels supported
2212  * @vsi: vsi pointer
2213  **/
2214 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
2215 {
2216 	/* TODO: This code assumes DCB and FD is disabled for now. */
2217 	return vsi->alloc_queue_pairs;
2218 }
2219 
2220 /**
2221  * i40e_get_channels - Get the current channels enabled and max supported etc.
2222  * @netdev: network interface device structure
2223  * @ch: ethtool channels structure
2224  *
2225  * We don't support separate tx and rx queues as channels. The other count
2226  * represents how many queues are being used for control. max_combined counts
2227  * how many queue pairs we can support. They may not be mapped 1 to 1 with
2228  * q_vectors since we support a lot more queue pairs than q_vectors.
2229  **/
2230 static void i40e_get_channels(struct net_device *dev,
2231 			       struct ethtool_channels *ch)
2232 {
2233 	struct i40e_netdev_priv *np = netdev_priv(dev);
2234 	struct i40e_vsi *vsi = np->vsi;
2235 	struct i40e_pf *pf = vsi->back;
2236 
2237 	/* report maximum channels */
2238 	ch->max_combined = i40e_max_channels(vsi);
2239 
2240 	/* report info for other vector */
2241 	ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
2242 	ch->max_other = ch->other_count;
2243 
2244 	/* Note: This code assumes DCB is disabled for now. */
2245 	ch->combined_count = vsi->num_queue_pairs;
2246 }
2247 
2248 /**
2249  * i40e_set_channels - Set the new channels count.
2250  * @netdev: network interface device structure
2251  * @ch: ethtool channels structure
2252  *
2253  * The new channels count may not be the same as requested by the user
2254  * since it gets rounded down to a power of 2 value.
2255  **/
2256 static int i40e_set_channels(struct net_device *dev,
2257 			      struct ethtool_channels *ch)
2258 {
2259 	struct i40e_netdev_priv *np = netdev_priv(dev);
2260 	unsigned int count = ch->combined_count;
2261 	struct i40e_vsi *vsi = np->vsi;
2262 	struct i40e_pf *pf = vsi->back;
2263 	int new_count;
2264 
2265 	/* We do not support setting channels for any other VSI at present */
2266 	if (vsi->type != I40E_VSI_MAIN)
2267 		return -EINVAL;
2268 
2269 	/* verify they are not requesting separate vectors */
2270 	if (!count || ch->rx_count || ch->tx_count)
2271 		return -EINVAL;
2272 
2273 	/* verify other_count has not changed */
2274 	if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
2275 		return -EINVAL;
2276 
2277 	/* verify the number of channels does not exceed hardware limits */
2278 	if (count > i40e_max_channels(vsi))
2279 		return -EINVAL;
2280 
2281 	/* update feature limits from largest to smallest supported values */
2282 	/* TODO: Flow director limit, DCB etc */
2283 
2284 	/* cap RSS limit */
2285 	if (count > pf->rss_size_max)
2286 		count = pf->rss_size_max;
2287 
2288 	/* use rss_reconfig to rebuild with new queue count and update traffic
2289 	 * class queue mapping
2290 	 */
2291 	new_count = i40e_reconfig_rss_queues(pf, count);
2292 	if (new_count > 0)
2293 		return 0;
2294 	else
2295 		return -EINVAL;
2296 }
2297 
2298 static const struct ethtool_ops i40e_ethtool_ops = {
2299 	.get_settings		= i40e_get_settings,
2300 	.set_settings		= i40e_set_settings,
2301 	.get_drvinfo		= i40e_get_drvinfo,
2302 	.get_regs_len		= i40e_get_regs_len,
2303 	.get_regs		= i40e_get_regs,
2304 	.nway_reset		= i40e_nway_reset,
2305 	.get_link		= ethtool_op_get_link,
2306 	.get_wol		= i40e_get_wol,
2307 	.set_wol		= i40e_set_wol,
2308 	.set_eeprom		= i40e_set_eeprom,
2309 	.get_eeprom_len		= i40e_get_eeprom_len,
2310 	.get_eeprom		= i40e_get_eeprom,
2311 	.get_ringparam		= i40e_get_ringparam,
2312 	.set_ringparam		= i40e_set_ringparam,
2313 	.get_pauseparam		= i40e_get_pauseparam,
2314 	.set_pauseparam		= i40e_set_pauseparam,
2315 	.get_msglevel		= i40e_get_msglevel,
2316 	.set_msglevel		= i40e_set_msglevel,
2317 	.get_rxnfc		= i40e_get_rxnfc,
2318 	.set_rxnfc		= i40e_set_rxnfc,
2319 	.self_test		= i40e_diag_test,
2320 	.get_strings		= i40e_get_strings,
2321 	.set_phys_id		= i40e_set_phys_id,
2322 	.get_sset_count		= i40e_get_sset_count,
2323 	.get_ethtool_stats	= i40e_get_ethtool_stats,
2324 	.get_coalesce		= i40e_get_coalesce,
2325 	.set_coalesce		= i40e_set_coalesce,
2326 	.get_channels		= i40e_get_channels,
2327 	.set_channels		= i40e_set_channels,
2328 	.get_ts_info		= i40e_get_ts_info,
2329 };
2330 
2331 void i40e_set_ethtool_ops(struct net_device *netdev)
2332 {
2333 	netdev->ethtool_ops = &i40e_ethtool_ops;
2334 }
2335