1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 /* ethtool support for i40e */
28 
29 #include "i40e.h"
30 #include "i40e_diag.h"
31 
32 struct i40e_stats {
33 	char stat_string[ETH_GSTRING_LEN];
34 	int sizeof_stat;
35 	int stat_offset;
36 };
37 
38 #define I40E_STAT(_type, _name, _stat) { \
39 	.stat_string = _name, \
40 	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
41 	.stat_offset = offsetof(_type, _stat) \
42 }
43 #define I40E_NETDEV_STAT(_net_stat) \
44 		I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
45 #define I40E_PF_STAT(_name, _stat) \
46 		I40E_STAT(struct i40e_pf, _name, _stat)
47 #define I40E_VSI_STAT(_name, _stat) \
48 		I40E_STAT(struct i40e_vsi, _name, _stat)
49 
50 static const struct i40e_stats i40e_gstrings_net_stats[] = {
51 	I40E_NETDEV_STAT(rx_packets),
52 	I40E_NETDEV_STAT(tx_packets),
53 	I40E_NETDEV_STAT(rx_bytes),
54 	I40E_NETDEV_STAT(tx_bytes),
55 	I40E_NETDEV_STAT(rx_errors),
56 	I40E_NETDEV_STAT(tx_errors),
57 	I40E_NETDEV_STAT(rx_dropped),
58 	I40E_NETDEV_STAT(tx_dropped),
59 	I40E_NETDEV_STAT(multicast),
60 	I40E_NETDEV_STAT(collisions),
61 	I40E_NETDEV_STAT(rx_length_errors),
62 	I40E_NETDEV_STAT(rx_crc_errors),
63 };
64 
65 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
66  * but they are separate.  This device supports Virtualization, and
67  * as such might have several netdevs supporting VMDq and FCoE going
68  * through a single port.  The NETDEV_STATs are for individual netdevs
69  * seen at the top of the stack, and the PF_STATs are for the physical
70  * function at the bottom of the stack hosting those netdevs.
71  *
72  * The PF_STATs are appended to the netdev stats only when ethtool -S
73  * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
74  */
75 static struct i40e_stats i40e_gstrings_stats[] = {
76 	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
77 	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
78 	I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
79 	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
80 	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
81 	I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
82 	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
83 	I40E_PF_STAT("crc_errors", stats.crc_errors),
84 	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
85 	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
86 	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
87 	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
88 	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
89 	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
90 	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
91 	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
92 	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
93 	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
94 	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
95 	I40E_PF_STAT("rx_size_511", stats.rx_size_511),
96 	I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
97 	I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
98 	I40E_PF_STAT("rx_size_big", stats.rx_size_big),
99 	I40E_PF_STAT("tx_size_64", stats.tx_size_64),
100 	I40E_PF_STAT("tx_size_127", stats.tx_size_127),
101 	I40E_PF_STAT("tx_size_255", stats.tx_size_255),
102 	I40E_PF_STAT("tx_size_511", stats.tx_size_511),
103 	I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
104 	I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
105 	I40E_PF_STAT("tx_size_big", stats.tx_size_big),
106 	I40E_PF_STAT("rx_undersize", stats.rx_undersize),
107 	I40E_PF_STAT("rx_fragments", stats.rx_fragments),
108 	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
109 	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
110 	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
111 	I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
112 	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
113 };
114 
115 #define I40E_QUEUE_STATS_LEN(n) \
116   ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
117     ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
118 #define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
119 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
120 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
121 				 I40E_QUEUE_STATS_LEN((n)))
122 #define I40E_PFC_STATS_LEN ( \
123 		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
124 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
125 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
126 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
127 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
128 		 / sizeof(u64))
129 #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
130 				 I40E_PFC_STATS_LEN + \
131 				 I40E_VSI_STATS_LEN((n)))
132 
133 enum i40e_ethtool_test_id {
134 	I40E_ETH_TEST_REG = 0,
135 	I40E_ETH_TEST_EEPROM,
136 	I40E_ETH_TEST_INTR,
137 	I40E_ETH_TEST_LOOPBACK,
138 	I40E_ETH_TEST_LINK,
139 };
140 
141 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
142 	"Register test  (offline)",
143 	"Eeprom test    (offline)",
144 	"Interrupt test (offline)",
145 	"Loopback test  (offline)",
146 	"Link test   (on/offline)"
147 };
148 
149 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
150 
151 /**
152  * i40e_get_settings - Get Link Speed and Duplex settings
153  * @netdev: network interface device structure
154  * @ecmd: ethtool command
155  *
156  * Reports speed/duplex settings based on media_type
157  **/
158 static int i40e_get_settings(struct net_device *netdev,
159 			     struct ethtool_cmd *ecmd)
160 {
161 	struct i40e_netdev_priv *np = netdev_priv(netdev);
162 	struct i40e_pf *pf = np->vsi->back;
163 	struct i40e_hw *hw = &pf->hw;
164 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
165 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
166 	u32 link_speed = hw_link_info->link_speed;
167 
168 	/* hardware is either in 40G mode or 10G mode
169 	 * NOTE: this section initializes supported and advertising
170 	 */
171 	switch (hw_link_info->phy_type) {
172 	case I40E_PHY_TYPE_40GBASE_CR4:
173 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
174 		ecmd->supported = SUPPORTED_40000baseCR4_Full;
175 		ecmd->advertising = ADVERTISED_40000baseCR4_Full;
176 		break;
177 	case I40E_PHY_TYPE_40GBASE_KR4:
178 		ecmd->supported = SUPPORTED_40000baseKR4_Full;
179 		ecmd->advertising = ADVERTISED_40000baseKR4_Full;
180 		break;
181 	case I40E_PHY_TYPE_40GBASE_SR4:
182 		ecmd->supported = SUPPORTED_40000baseSR4_Full;
183 		ecmd->advertising = ADVERTISED_40000baseSR4_Full;
184 		break;
185 	case I40E_PHY_TYPE_40GBASE_LR4:
186 		ecmd->supported = SUPPORTED_40000baseLR4_Full;
187 		ecmd->advertising = ADVERTISED_40000baseLR4_Full;
188 		break;
189 	case I40E_PHY_TYPE_10GBASE_KX4:
190 		ecmd->supported = SUPPORTED_10000baseKX4_Full;
191 		ecmd->advertising = ADVERTISED_10000baseKX4_Full;
192 		break;
193 	case I40E_PHY_TYPE_10GBASE_KR:
194 		ecmd->supported = SUPPORTED_10000baseKR_Full;
195 		ecmd->advertising = ADVERTISED_10000baseKR_Full;
196 		break;
197 	default:
198 		if (i40e_is_40G_device(hw->device_id)) {
199 			ecmd->supported = SUPPORTED_40000baseSR4_Full;
200 			ecmd->advertising = ADVERTISED_40000baseSR4_Full;
201 		} else {
202 			ecmd->supported = SUPPORTED_10000baseT_Full;
203 			ecmd->advertising = ADVERTISED_10000baseT_Full;
204 		}
205 		break;
206 	}
207 
208 	ecmd->supported |= SUPPORTED_Autoneg;
209 	ecmd->advertising |= ADVERTISED_Autoneg;
210 	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
211 			  AUTONEG_ENABLE : AUTONEG_DISABLE);
212 
213 	switch (hw->phy.media_type) {
214 	case I40E_MEDIA_TYPE_BACKPLANE:
215 		ecmd->supported |= SUPPORTED_Backplane;
216 		ecmd->advertising |= ADVERTISED_Backplane;
217 		ecmd->port = PORT_NONE;
218 		break;
219 	case I40E_MEDIA_TYPE_BASET:
220 		ecmd->supported |= SUPPORTED_TP;
221 		ecmd->advertising |= ADVERTISED_TP;
222 		ecmd->port = PORT_TP;
223 		break;
224 	case I40E_MEDIA_TYPE_DA:
225 	case I40E_MEDIA_TYPE_CX4:
226 		ecmd->supported |= SUPPORTED_FIBRE;
227 		ecmd->advertising |= ADVERTISED_FIBRE;
228 		ecmd->port = PORT_DA;
229 		break;
230 	case I40E_MEDIA_TYPE_FIBER:
231 		ecmd->supported |= SUPPORTED_FIBRE;
232 		ecmd->advertising |= ADVERTISED_FIBRE;
233 		ecmd->port = PORT_FIBRE;
234 		break;
235 	case I40E_MEDIA_TYPE_UNKNOWN:
236 	default:
237 		ecmd->port = PORT_OTHER;
238 		break;
239 	}
240 
241 	ecmd->transceiver = XCVR_EXTERNAL;
242 
243 	if (link_up) {
244 		switch (link_speed) {
245 		case I40E_LINK_SPEED_40GB:
246 			/* need a SPEED_40000 in ethtool.h */
247 			ethtool_cmd_speed_set(ecmd, 40000);
248 			break;
249 		case I40E_LINK_SPEED_10GB:
250 			ethtool_cmd_speed_set(ecmd, SPEED_10000);
251 			break;
252 		default:
253 			break;
254 		}
255 		ecmd->duplex = DUPLEX_FULL;
256 	} else {
257 		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
258 		ecmd->duplex = DUPLEX_UNKNOWN;
259 	}
260 
261 	return 0;
262 }
263 
264 /**
265  * i40e_get_pauseparam -  Get Flow Control status
266  * Return tx/rx-pause status
267  **/
268 static void i40e_get_pauseparam(struct net_device *netdev,
269 				struct ethtool_pauseparam *pause)
270 {
271 	struct i40e_netdev_priv *np = netdev_priv(netdev);
272 	struct i40e_pf *pf = np->vsi->back;
273 	struct i40e_hw *hw = &pf->hw;
274 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
275 
276 	pause->autoneg =
277 		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
278 		  AUTONEG_ENABLE : AUTONEG_DISABLE);
279 
280 	if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
281 		pause->rx_pause = 1;
282 	} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
283 		pause->tx_pause = 1;
284 	} else if (hw->fc.current_mode == I40E_FC_FULL) {
285 		pause->rx_pause = 1;
286 		pause->tx_pause = 1;
287 	}
288 }
289 
290 static u32 i40e_get_msglevel(struct net_device *netdev)
291 {
292 	struct i40e_netdev_priv *np = netdev_priv(netdev);
293 	struct i40e_pf *pf = np->vsi->back;
294 
295 	return pf->msg_enable;
296 }
297 
298 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
299 {
300 	struct i40e_netdev_priv *np = netdev_priv(netdev);
301 	struct i40e_pf *pf = np->vsi->back;
302 
303 	if (I40E_DEBUG_USER & data)
304 		pf->hw.debug_mask = data;
305 	pf->msg_enable = data;
306 }
307 
308 static int i40e_get_regs_len(struct net_device *netdev)
309 {
310 	int reg_count = 0;
311 	int i;
312 
313 	for (i = 0; i40e_reg_list[i].offset != 0; i++)
314 		reg_count += i40e_reg_list[i].elements;
315 
316 	return reg_count * sizeof(u32);
317 }
318 
319 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
320 			  void *p)
321 {
322 	struct i40e_netdev_priv *np = netdev_priv(netdev);
323 	struct i40e_pf *pf = np->vsi->back;
324 	struct i40e_hw *hw = &pf->hw;
325 	u32 *reg_buf = p;
326 	int i, j, ri;
327 	u32 reg;
328 
329 	/* Tell ethtool which driver-version-specific regs output we have.
330 	 *
331 	 * At some point, if we have ethtool doing special formatting of
332 	 * this data, it will rely on this version number to know how to
333 	 * interpret things.  Hence, this needs to be updated if/when the
334 	 * diags register table is changed.
335 	 */
336 	regs->version = 1;
337 
338 	/* loop through the diags reg table for what to print */
339 	ri = 0;
340 	for (i = 0; i40e_reg_list[i].offset != 0; i++) {
341 		for (j = 0; j < i40e_reg_list[i].elements; j++) {
342 			reg = i40e_reg_list[i].offset
343 				+ (j * i40e_reg_list[i].stride);
344 			reg_buf[ri++] = rd32(hw, reg);
345 		}
346 	}
347 
348 }
349 
350 static int i40e_get_eeprom(struct net_device *netdev,
351 			   struct ethtool_eeprom *eeprom, u8 *bytes)
352 {
353 	struct i40e_netdev_priv *np = netdev_priv(netdev);
354 	struct i40e_hw *hw = &np->vsi->back->hw;
355 	struct i40e_pf *pf = np->vsi->back;
356 	int ret_val = 0, len;
357 	u8 *eeprom_buff;
358 	u16 i, sectors;
359 	bool last;
360 #define I40E_NVM_SECTOR_SIZE  4096
361 	if (eeprom->len == 0)
362 		return -EINVAL;
363 
364 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
365 
366 	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
367 	if (!eeprom_buff)
368 		return -ENOMEM;
369 
370 	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
371 	if (ret_val) {
372 		dev_info(&pf->pdev->dev,
373 			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
374 			 ret_val, hw->aq.asq_last_status);
375 		goto free_buff;
376 	}
377 
378 	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
379 	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
380 	len = I40E_NVM_SECTOR_SIZE;
381 	last = false;
382 	for (i = 0; i < sectors; i++) {
383 		if (i == (sectors - 1)) {
384 			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
385 			last = true;
386 		}
387 		ret_val = i40e_aq_read_nvm(hw, 0x0,
388 				eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
389 				len,
390 				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
391 				last, NULL);
392 		if (ret_val) {
393 			dev_info(&pf->pdev->dev,
394 				 "read NVM failed err=%d status=0x%x\n",
395 				 ret_val, hw->aq.asq_last_status);
396 			goto release_nvm;
397 		}
398 	}
399 
400 release_nvm:
401 	i40e_release_nvm(hw);
402 	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
403 free_buff:
404 	kfree(eeprom_buff);
405 	return ret_val;
406 }
407 
408 static int i40e_get_eeprom_len(struct net_device *netdev)
409 {
410 	struct i40e_netdev_priv *np = netdev_priv(netdev);
411 	struct i40e_hw *hw = &np->vsi->back->hw;
412 	u32 val;
413 
414 	val = (rd32(hw, I40E_GLPCI_LBARCTRL)
415 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
416 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
417 	/* register returns value in power of 2, 64Kbyte chunks. */
418 	val = (64 * 1024) * (1 << val);
419 	return val;
420 }
421 
422 static void i40e_get_drvinfo(struct net_device *netdev,
423 			     struct ethtool_drvinfo *drvinfo)
424 {
425 	struct i40e_netdev_priv *np = netdev_priv(netdev);
426 	struct i40e_vsi *vsi = np->vsi;
427 	struct i40e_pf *pf = vsi->back;
428 
429 	strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
430 	strlcpy(drvinfo->version, i40e_driver_version_str,
431 		sizeof(drvinfo->version));
432 	strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
433 		sizeof(drvinfo->fw_version));
434 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
435 		sizeof(drvinfo->bus_info));
436 }
437 
438 static void i40e_get_ringparam(struct net_device *netdev,
439 			       struct ethtool_ringparam *ring)
440 {
441 	struct i40e_netdev_priv *np = netdev_priv(netdev);
442 	struct i40e_pf *pf = np->vsi->back;
443 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
444 
445 	ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
446 	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
447 	ring->rx_mini_max_pending = 0;
448 	ring->rx_jumbo_max_pending = 0;
449 	ring->rx_pending = vsi->rx_rings[0]->count;
450 	ring->tx_pending = vsi->tx_rings[0]->count;
451 	ring->rx_mini_pending = 0;
452 	ring->rx_jumbo_pending = 0;
453 }
454 
455 static int i40e_set_ringparam(struct net_device *netdev,
456 			      struct ethtool_ringparam *ring)
457 {
458 	struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
459 	struct i40e_netdev_priv *np = netdev_priv(netdev);
460 	struct i40e_vsi *vsi = np->vsi;
461 	struct i40e_pf *pf = vsi->back;
462 	u32 new_rx_count, new_tx_count;
463 	int i, err = 0;
464 
465 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
466 		return -EINVAL;
467 
468 	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
469 	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
470 	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
471 	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
472 		netdev_info(netdev,
473 			    "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
474 			    ring->tx_pending, ring->rx_pending,
475 			    I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
476 		return -EINVAL;
477 	}
478 
479 	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
480 	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
481 
482 	/* if nothing to do return success */
483 	if ((new_tx_count == vsi->tx_rings[0]->count) &&
484 	    (new_rx_count == vsi->rx_rings[0]->count))
485 		return 0;
486 
487 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
488 		usleep_range(1000, 2000);
489 
490 	if (!netif_running(vsi->netdev)) {
491 		/* simple case - set for the next time the netdev is started */
492 		for (i = 0; i < vsi->num_queue_pairs; i++) {
493 			vsi->tx_rings[i]->count = new_tx_count;
494 			vsi->rx_rings[i]->count = new_rx_count;
495 		}
496 		goto done;
497 	}
498 
499 	/* We can't just free everything and then setup again,
500 	 * because the ISRs in MSI-X mode get passed pointers
501 	 * to the Tx and Rx ring structs.
502 	 */
503 
504 	/* alloc updated Tx resources */
505 	if (new_tx_count != vsi->tx_rings[0]->count) {
506 		netdev_info(netdev,
507 			    "Changing Tx descriptor count from %d to %d.\n",
508 			    vsi->tx_rings[0]->count, new_tx_count);
509 		tx_rings = kcalloc(vsi->alloc_queue_pairs,
510 				   sizeof(struct i40e_ring), GFP_KERNEL);
511 		if (!tx_rings) {
512 			err = -ENOMEM;
513 			goto done;
514 		}
515 
516 		for (i = 0; i < vsi->num_queue_pairs; i++) {
517 			/* clone ring and setup updated count */
518 			tx_rings[i] = *vsi->tx_rings[i];
519 			tx_rings[i].count = new_tx_count;
520 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
521 			if (err) {
522 				while (i) {
523 					i--;
524 					i40e_free_tx_resources(&tx_rings[i]);
525 				}
526 				kfree(tx_rings);
527 				tx_rings = NULL;
528 
529 				goto done;
530 			}
531 		}
532 	}
533 
534 	/* alloc updated Rx resources */
535 	if (new_rx_count != vsi->rx_rings[0]->count) {
536 		netdev_info(netdev,
537 			    "Changing Rx descriptor count from %d to %d\n",
538 			    vsi->rx_rings[0]->count, new_rx_count);
539 		rx_rings = kcalloc(vsi->alloc_queue_pairs,
540 				   sizeof(struct i40e_ring), GFP_KERNEL);
541 		if (!rx_rings) {
542 			err = -ENOMEM;
543 			goto free_tx;
544 		}
545 
546 		for (i = 0; i < vsi->num_queue_pairs; i++) {
547 			/* clone ring and setup updated count */
548 			rx_rings[i] = *vsi->rx_rings[i];
549 			rx_rings[i].count = new_rx_count;
550 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
551 			if (err) {
552 				while (i) {
553 					i--;
554 					i40e_free_rx_resources(&rx_rings[i]);
555 				}
556 				kfree(rx_rings);
557 				rx_rings = NULL;
558 
559 				goto free_tx;
560 			}
561 		}
562 	}
563 
564 	/* Bring interface down, copy in the new ring info,
565 	 * then restore the interface
566 	 */
567 	i40e_down(vsi);
568 
569 	if (tx_rings) {
570 		for (i = 0; i < vsi->num_queue_pairs; i++) {
571 			i40e_free_tx_resources(vsi->tx_rings[i]);
572 			*vsi->tx_rings[i] = tx_rings[i];
573 		}
574 		kfree(tx_rings);
575 		tx_rings = NULL;
576 	}
577 
578 	if (rx_rings) {
579 		for (i = 0; i < vsi->num_queue_pairs; i++) {
580 			i40e_free_rx_resources(vsi->rx_rings[i]);
581 			*vsi->rx_rings[i] = rx_rings[i];
582 		}
583 		kfree(rx_rings);
584 		rx_rings = NULL;
585 	}
586 
587 	i40e_up(vsi);
588 
589 free_tx:
590 	/* error cleanup if the Rx allocations failed after getting Tx */
591 	if (tx_rings) {
592 		for (i = 0; i < vsi->num_queue_pairs; i++)
593 			i40e_free_tx_resources(&tx_rings[i]);
594 		kfree(tx_rings);
595 		tx_rings = NULL;
596 	}
597 
598 done:
599 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
600 
601 	return err;
602 }
603 
604 static int i40e_get_sset_count(struct net_device *netdev, int sset)
605 {
606 	struct i40e_netdev_priv *np = netdev_priv(netdev);
607 	struct i40e_vsi *vsi = np->vsi;
608 	struct i40e_pf *pf = vsi->back;
609 
610 	switch (sset) {
611 	case ETH_SS_TEST:
612 		return I40E_TEST_LEN;
613 	case ETH_SS_STATS:
614 		if (vsi == pf->vsi[pf->lan_vsi])
615 			return I40E_PF_STATS_LEN(netdev);
616 		else
617 			return I40E_VSI_STATS_LEN(netdev);
618 	default:
619 		return -EOPNOTSUPP;
620 	}
621 }
622 
623 static void i40e_get_ethtool_stats(struct net_device *netdev,
624 				   struct ethtool_stats *stats, u64 *data)
625 {
626 	struct i40e_netdev_priv *np = netdev_priv(netdev);
627 	struct i40e_vsi *vsi = np->vsi;
628 	struct i40e_pf *pf = vsi->back;
629 	int i = 0;
630 	char *p;
631 	int j;
632 	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
633 	unsigned int start;
634 
635 	i40e_update_stats(vsi);
636 
637 	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
638 		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
639 		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
640 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
641 	}
642 	rcu_read_lock();
643 	for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
644 		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
645 		struct i40e_ring *rx_ring;
646 
647 		if (!tx_ring)
648 			continue;
649 
650 		/* process Tx ring statistics */
651 		do {
652 			start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
653 			data[i] = tx_ring->stats.packets;
654 			data[i + 1] = tx_ring->stats.bytes;
655 		} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
656 
657 		/* Rx ring is the 2nd half of the queue pair */
658 		rx_ring = &tx_ring[1];
659 		do {
660 			start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
661 			data[i + 2] = rx_ring->stats.packets;
662 			data[i + 3] = rx_ring->stats.bytes;
663 		} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
664 	}
665 	rcu_read_unlock();
666 	if (vsi == pf->vsi[pf->lan_vsi]) {
667 		for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
668 			p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
669 			data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
670 				   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
671 		}
672 		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
673 			data[i++] = pf->stats.priority_xon_tx[j];
674 			data[i++] = pf->stats.priority_xoff_tx[j];
675 		}
676 		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
677 			data[i++] = pf->stats.priority_xon_rx[j];
678 			data[i++] = pf->stats.priority_xoff_rx[j];
679 		}
680 		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
681 			data[i++] = pf->stats.priority_xon_2_xoff[j];
682 	}
683 }
684 
685 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
686 			     u8 *data)
687 {
688 	struct i40e_netdev_priv *np = netdev_priv(netdev);
689 	struct i40e_vsi *vsi = np->vsi;
690 	struct i40e_pf *pf = vsi->back;
691 	char *p = (char *)data;
692 	int i;
693 
694 	switch (stringset) {
695 	case ETH_SS_TEST:
696 		for (i = 0; i < I40E_TEST_LEN; i++) {
697 			memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
698 			data += ETH_GSTRING_LEN;
699 		}
700 		break;
701 	case ETH_SS_STATS:
702 		for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
703 			snprintf(p, ETH_GSTRING_LEN, "%s",
704 				 i40e_gstrings_net_stats[i].stat_string);
705 			p += ETH_GSTRING_LEN;
706 		}
707 		for (i = 0; i < vsi->num_queue_pairs; i++) {
708 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
709 			p += ETH_GSTRING_LEN;
710 			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
711 			p += ETH_GSTRING_LEN;
712 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
713 			p += ETH_GSTRING_LEN;
714 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
715 			p += ETH_GSTRING_LEN;
716 		}
717 		if (vsi == pf->vsi[pf->lan_vsi]) {
718 			for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
719 				snprintf(p, ETH_GSTRING_LEN, "port.%s",
720 					 i40e_gstrings_stats[i].stat_string);
721 				p += ETH_GSTRING_LEN;
722 			}
723 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
724 				snprintf(p, ETH_GSTRING_LEN,
725 					 "port.tx_priority_%u_xon", i);
726 				p += ETH_GSTRING_LEN;
727 				snprintf(p, ETH_GSTRING_LEN,
728 					 "port.tx_priority_%u_xoff", i);
729 				p += ETH_GSTRING_LEN;
730 			}
731 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
732 				snprintf(p, ETH_GSTRING_LEN,
733 					 "port.rx_priority_%u_xon", i);
734 				p += ETH_GSTRING_LEN;
735 				snprintf(p, ETH_GSTRING_LEN,
736 					 "port.rx_priority_%u_xoff", i);
737 				p += ETH_GSTRING_LEN;
738 			}
739 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
740 				snprintf(p, ETH_GSTRING_LEN,
741 					 "port.rx_priority_%u_xon_2_xoff", i);
742 				p += ETH_GSTRING_LEN;
743 			}
744 		}
745 		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
746 		break;
747 	}
748 }
749 
750 static int i40e_get_ts_info(struct net_device *dev,
751 			    struct ethtool_ts_info *info)
752 {
753 	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
754 
755 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
756 				SOF_TIMESTAMPING_RX_SOFTWARE |
757 				SOF_TIMESTAMPING_SOFTWARE |
758 				SOF_TIMESTAMPING_TX_HARDWARE |
759 				SOF_TIMESTAMPING_RX_HARDWARE |
760 				SOF_TIMESTAMPING_RAW_HARDWARE;
761 
762 	if (pf->ptp_clock)
763 		info->phc_index = ptp_clock_index(pf->ptp_clock);
764 	else
765 		info->phc_index = -1;
766 
767 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
768 
769 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
770 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
771 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
772 			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
773 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
774 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
775 			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
776 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
777 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
778 			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
779 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
780 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
781 
782 	return 0;
783 }
784 
785 static int i40e_link_test(struct net_device *netdev, u64 *data)
786 {
787 	struct i40e_netdev_priv *np = netdev_priv(netdev);
788 	struct i40e_pf *pf = np->vsi->back;
789 
790 	netif_info(pf, hw, netdev, "link test\n");
791 	if (i40e_get_link_status(&pf->hw))
792 		*data = 0;
793 	else
794 		*data = 1;
795 
796 	return *data;
797 }
798 
799 static int i40e_reg_test(struct net_device *netdev, u64 *data)
800 {
801 	struct i40e_netdev_priv *np = netdev_priv(netdev);
802 	struct i40e_pf *pf = np->vsi->back;
803 
804 	netif_info(pf, hw, netdev, "register test\n");
805 	*data = i40e_diag_reg_test(&pf->hw);
806 
807 	return *data;
808 }
809 
810 static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
811 {
812 	struct i40e_netdev_priv *np = netdev_priv(netdev);
813 	struct i40e_pf *pf = np->vsi->back;
814 
815 	netif_info(pf, hw, netdev, "eeprom test\n");
816 	*data = i40e_diag_eeprom_test(&pf->hw);
817 
818 	return *data;
819 }
820 
821 static int i40e_intr_test(struct net_device *netdev, u64 *data)
822 {
823 	struct i40e_netdev_priv *np = netdev_priv(netdev);
824 	struct i40e_pf *pf = np->vsi->back;
825 	u16 swc_old = pf->sw_int_count;
826 
827 	netif_info(pf, hw, netdev, "interrupt test\n");
828 	wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
829 	     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
830 	      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
831 	usleep_range(1000, 2000);
832 	*data = (swc_old == pf->sw_int_count);
833 
834 	return *data;
835 }
836 
837 static int i40e_loopback_test(struct net_device *netdev, u64 *data)
838 {
839 	struct i40e_netdev_priv *np = netdev_priv(netdev);
840 	struct i40e_pf *pf = np->vsi->back;
841 
842 	netif_info(pf, hw, netdev, "loopback test not implemented\n");
843 	*data = 0;
844 
845 	return *data;
846 }
847 
848 static void i40e_diag_test(struct net_device *netdev,
849 			   struct ethtool_test *eth_test, u64 *data)
850 {
851 	struct i40e_netdev_priv *np = netdev_priv(netdev);
852 	struct i40e_pf *pf = np->vsi->back;
853 
854 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
855 		/* Offline tests */
856 		netif_info(pf, drv, netdev, "offline testing starting\n");
857 
858 		set_bit(__I40E_TESTING, &pf->state);
859 
860 		/* Link test performed before hardware reset
861 		 * so autoneg doesn't interfere with test result
862 		 */
863 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
864 			eth_test->flags |= ETH_TEST_FL_FAILED;
865 
866 		if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
867 			eth_test->flags |= ETH_TEST_FL_FAILED;
868 
869 		if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
870 			eth_test->flags |= ETH_TEST_FL_FAILED;
871 
872 		if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
873 			eth_test->flags |= ETH_TEST_FL_FAILED;
874 
875 		/* run reg test last, a reset is required after it */
876 		if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
877 			eth_test->flags |= ETH_TEST_FL_FAILED;
878 
879 		clear_bit(__I40E_TESTING, &pf->state);
880 		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
881 	} else {
882 		/* Online tests */
883 		netif_info(pf, drv, netdev, "online testing starting\n");
884 
885 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
886 			eth_test->flags |= ETH_TEST_FL_FAILED;
887 
888 		/* Offline only tests, not run in online; pass by default */
889 		data[I40E_ETH_TEST_REG] = 0;
890 		data[I40E_ETH_TEST_EEPROM] = 0;
891 		data[I40E_ETH_TEST_INTR] = 0;
892 		data[I40E_ETH_TEST_LOOPBACK] = 0;
893 	}
894 
895 	netif_info(pf, drv, netdev, "testing finished\n");
896 }
897 
898 static void i40e_get_wol(struct net_device *netdev,
899 			 struct ethtool_wolinfo *wol)
900 {
901 	struct i40e_netdev_priv *np = netdev_priv(netdev);
902 	struct i40e_pf *pf = np->vsi->back;
903 	struct i40e_hw *hw = &pf->hw;
904 	u16 wol_nvm_bits;
905 
906 	/* NVM bit on means WoL disabled for the port */
907 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
908 	if ((1 << hw->port) & wol_nvm_bits) {
909 		wol->supported = 0;
910 		wol->wolopts = 0;
911 	} else {
912 		wol->supported = WAKE_MAGIC;
913 		wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
914 	}
915 }
916 
917 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
918 {
919 	struct i40e_netdev_priv *np = netdev_priv(netdev);
920 	struct i40e_pf *pf = np->vsi->back;
921 	struct i40e_hw *hw = &pf->hw;
922 	u16 wol_nvm_bits;
923 
924 	/* NVM bit on means WoL disabled for the port */
925 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
926 	if (((1 << hw->port) & wol_nvm_bits))
927 		return -EOPNOTSUPP;
928 
929 	/* only magic packet is supported */
930 	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
931 		return -EOPNOTSUPP;
932 
933 	/* is this a new value? */
934 	if (pf->wol_en != !!wol->wolopts) {
935 		pf->wol_en = !!wol->wolopts;
936 		device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
937 	}
938 
939 	return 0;
940 }
941 
942 static int i40e_nway_reset(struct net_device *netdev)
943 {
944 	/* restart autonegotiation */
945 	struct i40e_netdev_priv *np = netdev_priv(netdev);
946 	struct i40e_pf *pf = np->vsi->back;
947 	struct i40e_hw *hw = &pf->hw;
948 	i40e_status ret = 0;
949 
950 	ret = i40e_aq_set_link_restart_an(hw, NULL);
951 	if (ret) {
952 		netdev_info(netdev, "link restart failed, aq_err=%d\n",
953 			    pf->hw.aq.asq_last_status);
954 		return -EIO;
955 	}
956 
957 	return 0;
958 }
959 
960 static int i40e_set_phys_id(struct net_device *netdev,
961 			    enum ethtool_phys_id_state state)
962 {
963 	struct i40e_netdev_priv *np = netdev_priv(netdev);
964 	struct i40e_pf *pf = np->vsi->back;
965 	struct i40e_hw *hw = &pf->hw;
966 	int blink_freq = 2;
967 
968 	switch (state) {
969 	case ETHTOOL_ID_ACTIVE:
970 		pf->led_status = i40e_led_get(hw);
971 		return blink_freq;
972 	case ETHTOOL_ID_ON:
973 		i40e_led_set(hw, 0xF, false);
974 		break;
975 	case ETHTOOL_ID_OFF:
976 		i40e_led_set(hw, 0x0, false);
977 		break;
978 	case ETHTOOL_ID_INACTIVE:
979 		i40e_led_set(hw, pf->led_status, false);
980 		break;
981 	}
982 
983 	return 0;
984 }
985 
986 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
987  * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
988  * 125us (8000 interrupts per second) == ITR(62)
989  */
990 
991 static int i40e_get_coalesce(struct net_device *netdev,
992 			     struct ethtool_coalesce *ec)
993 {
994 	struct i40e_netdev_priv *np = netdev_priv(netdev);
995 	struct i40e_vsi *vsi = np->vsi;
996 
997 	ec->tx_max_coalesced_frames_irq = vsi->work_limit;
998 	ec->rx_max_coalesced_frames_irq = vsi->work_limit;
999 
1000 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1001 		ec->rx_coalesce_usecs = 1;
1002 	else
1003 		ec->rx_coalesce_usecs = vsi->rx_itr_setting;
1004 
1005 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1006 		ec->tx_coalesce_usecs = 1;
1007 	else
1008 		ec->tx_coalesce_usecs = vsi->tx_itr_setting;
1009 
1010 	return 0;
1011 }
1012 
1013 static int i40e_set_coalesce(struct net_device *netdev,
1014 			     struct ethtool_coalesce *ec)
1015 {
1016 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1017 	struct i40e_q_vector *q_vector;
1018 	struct i40e_vsi *vsi = np->vsi;
1019 	struct i40e_pf *pf = vsi->back;
1020 	struct i40e_hw *hw = &pf->hw;
1021 	u16 vector;
1022 	int i;
1023 
1024 	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1025 		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1026 
1027 	switch (ec->rx_coalesce_usecs) {
1028 	case 0:
1029 		vsi->rx_itr_setting = 0;
1030 		break;
1031 	case 1:
1032 		vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1033 				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1034 		break;
1035 	default:
1036 		if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1037 		    (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1038 			return -EINVAL;
1039 		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1040 		break;
1041 	}
1042 
1043 	switch (ec->tx_coalesce_usecs) {
1044 	case 0:
1045 		vsi->tx_itr_setting = 0;
1046 		break;
1047 	case 1:
1048 		vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1049 				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1050 		break;
1051 	default:
1052 		if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1053 		    (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1054 			return -EINVAL;
1055 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1056 		break;
1057 	}
1058 
1059 	vector = vsi->base_vector;
1060 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1061 		q_vector = vsi->q_vectors[i];
1062 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1063 		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1064 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1065 		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1066 		i40e_flush(hw);
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 /**
1073  * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1074  * @pf: pointer to the physical function struct
1075  * @cmd: ethtool rxnfc command
1076  *
1077  * Returns Success if the flow is supported, else Invalid Input.
1078  **/
1079 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1080 {
1081 	cmd->data = 0;
1082 
1083 	/* Report default options for RSS on i40e */
1084 	switch (cmd->flow_type) {
1085 	case TCP_V4_FLOW:
1086 	case UDP_V4_FLOW:
1087 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1088 	/* fall through to add IP fields */
1089 	case SCTP_V4_FLOW:
1090 	case AH_ESP_V4_FLOW:
1091 	case AH_V4_FLOW:
1092 	case ESP_V4_FLOW:
1093 	case IPV4_FLOW:
1094 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1095 		break;
1096 	case TCP_V6_FLOW:
1097 	case UDP_V6_FLOW:
1098 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1099 	/* fall through to add IP fields */
1100 	case SCTP_V6_FLOW:
1101 	case AH_ESP_V6_FLOW:
1102 	case AH_V6_FLOW:
1103 	case ESP_V6_FLOW:
1104 	case IPV6_FLOW:
1105 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1106 		break;
1107 	default:
1108 		return -EINVAL;
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 /**
1115  * i40e_get_rxnfc - command to get RX flow classification rules
1116  * @netdev: network interface device structure
1117  * @cmd: ethtool rxnfc command
1118  *
1119  * Returns Success if the command is supported.
1120  **/
1121 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1122 			  u32 *rule_locs)
1123 {
1124 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1125 	struct i40e_vsi *vsi = np->vsi;
1126 	struct i40e_pf *pf = vsi->back;
1127 	int ret = -EOPNOTSUPP;
1128 
1129 	switch (cmd->cmd) {
1130 	case ETHTOOL_GRXRINGS:
1131 		cmd->data = vsi->alloc_queue_pairs;
1132 		ret = 0;
1133 		break;
1134 	case ETHTOOL_GRXFH:
1135 		ret = i40e_get_rss_hash_opts(pf, cmd);
1136 		break;
1137 	case ETHTOOL_GRXCLSRLCNT:
1138 		cmd->rule_cnt = 10;
1139 		ret = 0;
1140 		break;
1141 	case ETHTOOL_GRXCLSRULE:
1142 		ret = 0;
1143 		break;
1144 	case ETHTOOL_GRXCLSRLALL:
1145 		cmd->data = 500;
1146 		ret = 0;
1147 	default:
1148 		break;
1149 	}
1150 
1151 	return ret;
1152 }
1153 
1154 /**
1155  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1156  * @pf: pointer to the physical function struct
1157  * @cmd: ethtool rxnfc command
1158  *
1159  * Returns Success if the flow input set is supported.
1160  **/
1161 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1162 {
1163 	struct i40e_hw *hw = &pf->hw;
1164 	u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1165 		   ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1166 
1167 	/* RSS does not support anything other than hashing
1168 	 * to queues on src and dst IPs and ports
1169 	 */
1170 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1171 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
1172 		return -EINVAL;
1173 
1174 	/* We need at least the IP SRC and DEST fields for hashing */
1175 	if (!(nfc->data & RXH_IP_SRC) ||
1176 	    !(nfc->data & RXH_IP_DST))
1177 		return -EINVAL;
1178 
1179 	switch (nfc->flow_type) {
1180 	case TCP_V4_FLOW:
1181 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1182 		case 0:
1183 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1184 			break;
1185 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1186 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1187 			break;
1188 		default:
1189 			return -EINVAL;
1190 		}
1191 		break;
1192 	case TCP_V6_FLOW:
1193 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1194 		case 0:
1195 			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1196 			break;
1197 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1198 			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1199 			break;
1200 		default:
1201 			return -EINVAL;
1202 		}
1203 		break;
1204 	case UDP_V4_FLOW:
1205 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1206 		case 0:
1207 			hena &=
1208 			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1209 			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1210 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1211 			break;
1212 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1213 			hena |=
1214 			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)  |
1215 			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1216 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1217 			break;
1218 		default:
1219 			return -EINVAL;
1220 		}
1221 		break;
1222 	case UDP_V6_FLOW:
1223 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1224 		case 0:
1225 			hena &=
1226 			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1227 			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1228 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1229 			break;
1230 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1231 			hena |=
1232 			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)  |
1233 			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1234 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1235 			break;
1236 		default:
1237 			return -EINVAL;
1238 		}
1239 		break;
1240 	case AH_ESP_V4_FLOW:
1241 	case AH_V4_FLOW:
1242 	case ESP_V4_FLOW:
1243 	case SCTP_V4_FLOW:
1244 		if ((nfc->data & RXH_L4_B_0_1) ||
1245 		    (nfc->data & RXH_L4_B_2_3))
1246 			return -EINVAL;
1247 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1248 		break;
1249 	case AH_ESP_V6_FLOW:
1250 	case AH_V6_FLOW:
1251 	case ESP_V6_FLOW:
1252 	case SCTP_V6_FLOW:
1253 		if ((nfc->data & RXH_L4_B_0_1) ||
1254 		    (nfc->data & RXH_L4_B_2_3))
1255 			return -EINVAL;
1256 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1257 		break;
1258 	case IPV4_FLOW:
1259 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
1260 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
1261 		break;
1262 	case IPV6_FLOW:
1263 		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
1264 			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1265 		break;
1266 	default:
1267 		return -EINVAL;
1268 	}
1269 
1270 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
1271 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1272 	i40e_flush(hw);
1273 
1274 	return 0;
1275 }
1276 
1277 #define IP_HEADER_OFFSET 14
1278 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
1279 /**
1280  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
1281  * a specific flow spec
1282  * @vsi: pointer to the targeted VSI
1283  * @fd_data: the flow director data required from the FDir descriptor
1284  * @ethtool_rx_flow_spec: the flow spec
1285  * @add: true adds a filter, false removes it
1286  *
1287  * Returns 0 if the filters were successfully added or removed
1288  **/
1289 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
1290 				   struct i40e_fdir_data *fd_data,
1291 				   struct ethtool_rx_flow_spec *fsp, bool add)
1292 {
1293 	struct i40e_pf *pf = vsi->back;
1294 	struct udphdr *udp;
1295 	struct iphdr *ip;
1296 	bool err = false;
1297 	int ret;
1298 	int i;
1299 	char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1300 			 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
1301 			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1302 			 0, 0, 0, 0, 0, 0, 0, 0};
1303 
1304 	memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
1305 
1306 	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1307 	udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1308 	      + sizeof(struct iphdr));
1309 
1310 	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1311 	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1312 	udp->source = fsp->h_u.tcp_ip4_spec.psrc;
1313 	udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1314 
1315 	for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
1316 	     i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
1317 		fd_data->pctype = i;
1318 		ret = i40e_program_fdir_filter(fd_data, pf, add);
1319 
1320 		if (ret) {
1321 			dev_info(&pf->pdev->dev,
1322 				 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1323 				 fd_data->pctype, ret);
1324 			err = true;
1325 		} else {
1326 			dev_info(&pf->pdev->dev,
1327 				 "Filter OK for PCTYPE %d (ret = %d)\n",
1328 				 fd_data->pctype, ret);
1329 		}
1330 	}
1331 
1332 	return err ? -EOPNOTSUPP : 0;
1333 }
1334 
1335 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
1336 /**
1337  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
1338  * a specific flow spec
1339  * @vsi: pointer to the targeted VSI
1340  * @fd_data: the flow director data required from the FDir descriptor
1341  * @ethtool_rx_flow_spec: the flow spec
1342  * @add: true adds a filter, false removes it
1343  *
1344  * Returns 0 if the filters were successfully added or removed
1345  **/
1346 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
1347 				   struct i40e_fdir_data *fd_data,
1348 				   struct ethtool_rx_flow_spec *fsp, bool add)
1349 {
1350 	struct i40e_pf *pf = vsi->back;
1351 	struct tcphdr *tcp;
1352 	struct iphdr *ip;
1353 	bool err = false;
1354 	int ret;
1355 	/* Dummy packet */
1356 	char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1357 			 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
1358 			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1359 			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1360 			 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
1361 
1362 	memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
1363 
1364 	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1365 	tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1366 	      + sizeof(struct iphdr));
1367 
1368 	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1369 	tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1370 	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1371 	tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
1372 
1373 	if (add) {
1374 		if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
1375 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
1376 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
1377 		}
1378 	}
1379 
1380 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
1381 	ret = i40e_program_fdir_filter(fd_data, pf, add);
1382 
1383 	if (ret) {
1384 		dev_info(&pf->pdev->dev,
1385 			 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1386 			 fd_data->pctype, ret);
1387 		err = true;
1388 	} else {
1389 		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1390 			 fd_data->pctype, ret);
1391 	}
1392 
1393 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
1394 
1395 	ret = i40e_program_fdir_filter(fd_data, pf, add);
1396 	if (ret) {
1397 		dev_info(&pf->pdev->dev,
1398 			 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1399 			 fd_data->pctype, ret);
1400 		err = true;
1401 	} else {
1402 		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1403 			  fd_data->pctype, ret);
1404 	}
1405 
1406 	return err ? -EOPNOTSUPP : 0;
1407 }
1408 
1409 /**
1410  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
1411  * a specific flow spec
1412  * @vsi: pointer to the targeted VSI
1413  * @fd_data: the flow director data required from the FDir descriptor
1414  * @ethtool_rx_flow_spec: the flow spec
1415  * @add: true adds a filter, false removes it
1416  *
1417  * Returns 0 if the filters were successfully added or removed
1418  **/
1419 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
1420 				    struct i40e_fdir_data *fd_data,
1421 				    struct ethtool_rx_flow_spec *fsp, bool add)
1422 {
1423 	return -EOPNOTSUPP;
1424 }
1425 
1426 #define I40E_IP_DUMMY_PACKET_LEN 34
1427 /**
1428  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
1429  * a specific flow spec
1430  * @vsi: pointer to the targeted VSI
1431  * @fd_data: the flow director data required for the FDir descriptor
1432  * @fsp: the ethtool flow spec
1433  * @add: true adds a filter, false removes it
1434  *
1435  * Returns 0 if the filters were successfully added or removed
1436  **/
1437 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
1438 				  struct i40e_fdir_data *fd_data,
1439 				  struct ethtool_rx_flow_spec *fsp, bool add)
1440 {
1441 	struct i40e_pf *pf = vsi->back;
1442 	struct iphdr *ip;
1443 	bool err = false;
1444 	int ret;
1445 	int i;
1446 	char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1447 			 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
1448 			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1449 
1450 	memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
1451 	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1452 
1453 	ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
1454 	ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
1455 	ip->protocol = fsp->h_u.usr_ip4_spec.proto;
1456 
1457 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
1458 	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
1459 		fd_data->pctype = i;
1460 		ret = i40e_program_fdir_filter(fd_data, pf, add);
1461 
1462 		if (ret) {
1463 			dev_info(&pf->pdev->dev,
1464 				 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1465 				 fd_data->pctype, ret);
1466 			err = true;
1467 		} else {
1468 			dev_info(&pf->pdev->dev,
1469 				 "Filter OK for PCTYPE %d (ret = %d)\n",
1470 				 fd_data->pctype, ret);
1471 		}
1472 	}
1473 
1474 	return err ? -EOPNOTSUPP : 0;
1475 }
1476 
1477 /**
1478  * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
1479  * a specific flow spec based on their protocol
1480  * @vsi: pointer to the targeted VSI
1481  * @cmd: command to get or set RX flow classification rules
1482  * @add: true adds a filter, false removes it
1483  *
1484  * Returns 0 if the filters were successfully added or removed
1485  **/
1486 static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
1487 			struct ethtool_rxnfc *cmd, bool add)
1488 {
1489 	struct i40e_fdir_data fd_data;
1490 	int ret = -EINVAL;
1491 	struct i40e_pf *pf;
1492 	struct ethtool_rx_flow_spec *fsp =
1493 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1494 
1495 	if (!vsi)
1496 		return -EINVAL;
1497 
1498 	pf = vsi->back;
1499 
1500 	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1501 	    (fsp->ring_cookie >= vsi->num_queue_pairs))
1502 		return -EINVAL;
1503 
1504 	/* Populate the Flow Director that we have at the moment
1505 	 * and allocate the raw packet buffer for the calling functions
1506 	 */
1507 	fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1508 				     GFP_KERNEL);
1509 
1510 	if (!fd_data.raw_packet) {
1511 		dev_info(&pf->pdev->dev, "Could not allocate memory\n");
1512 		return -ENOMEM;
1513 	}
1514 
1515 	fd_data.q_index = fsp->ring_cookie;
1516 	fd_data.flex_off = 0;
1517 	fd_data.pctype = 0;
1518 	fd_data.dest_vsi = vsi->id;
1519 	fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1520 	fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1521 	fd_data.cnt_index = 0;
1522 	fd_data.fd_id = 0;
1523 
1524 	switch (fsp->flow_type & ~FLOW_EXT) {
1525 	case TCP_V4_FLOW:
1526 		ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1527 		break;
1528 	case UDP_V4_FLOW:
1529 		ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1530 		break;
1531 	case SCTP_V4_FLOW:
1532 		ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1533 		break;
1534 	case IPV4_FLOW:
1535 		ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1536 		break;
1537 	case IP_USER_FLOW:
1538 		switch (fsp->h_u.usr_ip4_spec.proto) {
1539 		case IPPROTO_TCP:
1540 			ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1541 			break;
1542 		case IPPROTO_UDP:
1543 			ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1544 			break;
1545 		case IPPROTO_SCTP:
1546 			ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1547 			break;
1548 		default:
1549 			ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1550 			break;
1551 		}
1552 		break;
1553 	default:
1554 		dev_info(&pf->pdev->dev, "Could not specify spec type\n");
1555 		ret = -EINVAL;
1556 	}
1557 
1558 	kfree(fd_data.raw_packet);
1559 	fd_data.raw_packet = NULL;
1560 
1561 	return ret;
1562 }
1563 
1564 /**
1565  * i40e_set_rxnfc - command to set RX flow classification rules
1566  * @netdev: network interface device structure
1567  * @cmd: ethtool rxnfc command
1568  *
1569  * Returns Success if the command is supported.
1570  **/
1571 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1572 {
1573 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1574 	struct i40e_vsi *vsi = np->vsi;
1575 	struct i40e_pf *pf = vsi->back;
1576 	int ret = -EOPNOTSUPP;
1577 
1578 	switch (cmd->cmd) {
1579 	case ETHTOOL_SRXFH:
1580 		ret = i40e_set_rss_hash_opt(pf, cmd);
1581 		break;
1582 	case ETHTOOL_SRXCLSRLINS:
1583 		ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
1584 		break;
1585 	case ETHTOOL_SRXCLSRLDEL:
1586 		ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
1587 		break;
1588 	default:
1589 		break;
1590 	}
1591 
1592 	return ret;
1593 }
1594 
1595 /**
1596  * i40e_max_channels - get Max number of combined channels supported
1597  * @vsi: vsi pointer
1598  **/
1599 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
1600 {
1601 	/* TODO: This code assumes DCB and FD is disabled for now. */
1602 	return vsi->alloc_queue_pairs;
1603 }
1604 
1605 /**
1606  * i40e_get_channels - Get the current channels enabled and max supported etc.
1607  * @netdev: network interface device structure
1608  * @ch: ethtool channels structure
1609  *
1610  * We don't support separate tx and rx queues as channels. The other count
1611  * represents how many queues are being used for control. max_combined counts
1612  * how many queue pairs we can support. They may not be mapped 1 to 1 with
1613  * q_vectors since we support a lot more queue pairs than q_vectors.
1614  **/
1615 static void i40e_get_channels(struct net_device *dev,
1616 			       struct ethtool_channels *ch)
1617 {
1618 	struct i40e_netdev_priv *np = netdev_priv(dev);
1619 	struct i40e_vsi *vsi = np->vsi;
1620 	struct i40e_pf *pf = vsi->back;
1621 
1622 	/* report maximum channels */
1623 	ch->max_combined = i40e_max_channels(vsi);
1624 
1625 	/* report info for other vector */
1626 	ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
1627 	ch->max_other = ch->other_count;
1628 
1629 	/* Note: This code assumes DCB is disabled for now. */
1630 	ch->combined_count = vsi->num_queue_pairs;
1631 }
1632 
1633 /**
1634  * i40e_set_channels - Set the new channels count.
1635  * @netdev: network interface device structure
1636  * @ch: ethtool channels structure
1637  *
1638  * The new channels count may not be the same as requested by the user
1639  * since it gets rounded down to a power of 2 value.
1640  **/
1641 static int i40e_set_channels(struct net_device *dev,
1642 			      struct ethtool_channels *ch)
1643 {
1644 	struct i40e_netdev_priv *np = netdev_priv(dev);
1645 	unsigned int count = ch->combined_count;
1646 	struct i40e_vsi *vsi = np->vsi;
1647 	struct i40e_pf *pf = vsi->back;
1648 	int new_count;
1649 
1650 	/* We do not support setting channels for any other VSI at present */
1651 	if (vsi->type != I40E_VSI_MAIN)
1652 		return -EINVAL;
1653 
1654 	/* verify they are not requesting separate vectors */
1655 	if (!count || ch->rx_count || ch->tx_count)
1656 		return -EINVAL;
1657 
1658 	/* verify other_count has not changed */
1659 	if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
1660 		return -EINVAL;
1661 
1662 	/* verify the number of channels does not exceed hardware limits */
1663 	if (count > i40e_max_channels(vsi))
1664 		return -EINVAL;
1665 
1666 	/* update feature limits from largest to smallest supported values */
1667 	/* TODO: Flow director limit, DCB etc */
1668 
1669 	/* cap RSS limit */
1670 	if (count > pf->rss_size_max)
1671 		count = pf->rss_size_max;
1672 
1673 	/* use rss_reconfig to rebuild with new queue count and update traffic
1674 	 * class queue mapping
1675 	 */
1676 	new_count = i40e_reconfig_rss_queues(pf, count);
1677 	if (new_count > 0)
1678 		return 0;
1679 	else
1680 		return -EINVAL;
1681 }
1682 
1683 static const struct ethtool_ops i40e_ethtool_ops = {
1684 	.get_settings		= i40e_get_settings,
1685 	.get_drvinfo		= i40e_get_drvinfo,
1686 	.get_regs_len		= i40e_get_regs_len,
1687 	.get_regs		= i40e_get_regs,
1688 	.nway_reset		= i40e_nway_reset,
1689 	.get_link		= ethtool_op_get_link,
1690 	.get_wol		= i40e_get_wol,
1691 	.set_wol		= i40e_set_wol,
1692 	.get_eeprom_len		= i40e_get_eeprom_len,
1693 	.get_eeprom		= i40e_get_eeprom,
1694 	.get_ringparam		= i40e_get_ringparam,
1695 	.set_ringparam		= i40e_set_ringparam,
1696 	.get_pauseparam		= i40e_get_pauseparam,
1697 	.get_msglevel		= i40e_get_msglevel,
1698 	.set_msglevel		= i40e_set_msglevel,
1699 	.get_rxnfc		= i40e_get_rxnfc,
1700 	.set_rxnfc		= i40e_set_rxnfc,
1701 	.self_test		= i40e_diag_test,
1702 	.get_strings		= i40e_get_strings,
1703 	.set_phys_id		= i40e_set_phys_id,
1704 	.get_sset_count		= i40e_get_sset_count,
1705 	.get_ethtool_stats	= i40e_get_ethtool_stats,
1706 	.get_coalesce		= i40e_get_coalesce,
1707 	.set_coalesce		= i40e_set_coalesce,
1708 	.get_channels		= i40e_get_channels,
1709 	.set_channels		= i40e_set_channels,
1710 	.get_ts_info		= i40e_get_ts_info,
1711 };
1712 
1713 void i40e_set_ethtool_ops(struct net_device *netdev)
1714 {
1715 	SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
1716 }
1717