1 /*******************************************************************************
2 
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17 
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20 
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 
25 *******************************************************************************/
26 
27 /* ethtool support for ixgbevf */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/if_vlan.h>
39 #include <linux/uaccess.h>
40 
41 #include "ixgbevf.h"
42 
43 #define IXGBE_ALL_RAR_ENTRIES 16
44 
45 struct ixgbe_stats {
46 	char stat_string[ETH_GSTRING_LEN];
47 	struct {
48 		int sizeof_stat;
49 		int stat_offset;
50 		int base_stat_offset;
51 		int saved_reset_offset;
52 	};
53 };
54 
55 #define IXGBEVF_STAT(m, b, r) { \
56 	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
57 	.stat_offset = offsetof(struct ixgbevf_adapter, m), \
58 	.base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
59 	.saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
60 }
61 
62 #define IXGBEVF_ZSTAT(m) { \
63 	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
64 	.stat_offset = offsetof(struct ixgbevf_adapter, m), \
65 	.base_stat_offset = -1, \
66 	.saved_reset_offset = -1 \
67 }
68 
69 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
70 	{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
71 				    stats.saved_reset_vfgprc)},
72 	{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
73 				    stats.saved_reset_vfgptc)},
74 	{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
75 				  stats.saved_reset_vfgorc)},
76 	{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
77 				  stats.saved_reset_vfgotc)},
78 	{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
79 	{"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
80 	{"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
81 	{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
82 				   stats.saved_reset_vfmprc)},
83 	{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
84 #ifdef BP_EXTENDED_STATS
85 	{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
86 	{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
87 	{"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
88 	{"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
89 	{"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
90 	{"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
91 #endif
92 };
93 
94 #define IXGBE_QUEUE_STATS_LEN 0
95 #define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
96 
97 #define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
98 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
99 	"Register test  (offline)",
100 	"Link test   (on/offline)"
101 };
102 
103 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
104 
105 static int ixgbevf_get_settings(struct net_device *netdev,
106 				struct ethtool_cmd *ecmd)
107 {
108 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
109 	struct ixgbe_hw *hw = &adapter->hw;
110 	u32 link_speed = 0;
111 	bool link_up;
112 
113 	ecmd->supported = SUPPORTED_10000baseT_Full;
114 	ecmd->autoneg = AUTONEG_DISABLE;
115 	ecmd->transceiver = XCVR_DUMMY1;
116 	ecmd->port = -1;
117 
118 	hw->mac.get_link_status = 1;
119 	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
120 
121 	if (link_up) {
122 		__u32 speed = SPEED_10000;
123 
124 		switch (link_speed) {
125 		case IXGBE_LINK_SPEED_10GB_FULL:
126 			speed = SPEED_10000;
127 			break;
128 		case IXGBE_LINK_SPEED_1GB_FULL:
129 			speed = SPEED_1000;
130 			break;
131 		case IXGBE_LINK_SPEED_100_FULL:
132 			speed = SPEED_100;
133 			break;
134 		}
135 
136 		ethtool_cmd_speed_set(ecmd, speed);
137 		ecmd->duplex = DUPLEX_FULL;
138 	} else {
139 		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
140 		ecmd->duplex = DUPLEX_UNKNOWN;
141 	}
142 
143 	return 0;
144 }
145 
146 static u32 ixgbevf_get_msglevel(struct net_device *netdev)
147 {
148 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
149 
150 	return adapter->msg_enable;
151 }
152 
153 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
154 {
155 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
156 
157 	adapter->msg_enable = data;
158 }
159 
160 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
161 
162 static int ixgbevf_get_regs_len(struct net_device *netdev)
163 {
164 #define IXGBE_REGS_LEN 45
165 	return IXGBE_REGS_LEN * sizeof(u32);
166 }
167 
168 static void ixgbevf_get_regs(struct net_device *netdev,
169 			     struct ethtool_regs *regs,
170 			     void *p)
171 {
172 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
173 	struct ixgbe_hw *hw = &adapter->hw;
174 	u32 *regs_buff = p;
175 	u32 regs_len = ixgbevf_get_regs_len(netdev);
176 	u8 i;
177 
178 	memset(p, 0, regs_len);
179 
180 	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
181 
182 	/* General Registers */
183 	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
184 	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
185 	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
186 	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
187 	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
188 
189 	/* Interrupt */
190 	/* don't read EICR because it can clear interrupt causes, instead
191 	 * read EICS which is a shadow but doesn't clear EICR
192 	 */
193 	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
194 	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
195 	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
196 	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
197 	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
198 	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
199 	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
200 	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
201 	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
202 
203 	/* Receive DMA */
204 	for (i = 0; i < 2; i++)
205 		regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
206 	for (i = 0; i < 2; i++)
207 		regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
208 	for (i = 0; i < 2; i++)
209 		regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
210 	for (i = 0; i < 2; i++)
211 		regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
212 	for (i = 0; i < 2; i++)
213 		regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
214 	for (i = 0; i < 2; i++)
215 		regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
216 	for (i = 0; i < 2; i++)
217 		regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
218 
219 	/* Receive */
220 	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
221 
222 	/* Transmit */
223 	for (i = 0; i < 2; i++)
224 		regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
225 	for (i = 0; i < 2; i++)
226 		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
227 	for (i = 0; i < 2; i++)
228 		regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
229 	for (i = 0; i < 2; i++)
230 		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
231 	for (i = 0; i < 2; i++)
232 		regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
233 	for (i = 0; i < 2; i++)
234 		regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
235 	for (i = 0; i < 2; i++)
236 		regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
237 	for (i = 0; i < 2; i++)
238 		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
239 }
240 
241 static void ixgbevf_get_drvinfo(struct net_device *netdev,
242 				struct ethtool_drvinfo *drvinfo)
243 {
244 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
245 
246 	strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
247 	strlcpy(drvinfo->version, ixgbevf_driver_version,
248 		sizeof(drvinfo->version));
249 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
250 		sizeof(drvinfo->bus_info));
251 }
252 
253 static void ixgbevf_get_ringparam(struct net_device *netdev,
254 				  struct ethtool_ringparam *ring)
255 {
256 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
257 
258 	ring->rx_max_pending = IXGBEVF_MAX_RXD;
259 	ring->tx_max_pending = IXGBEVF_MAX_TXD;
260 	ring->rx_pending = adapter->rx_ring_count;
261 	ring->tx_pending = adapter->tx_ring_count;
262 }
263 
264 static int ixgbevf_set_ringparam(struct net_device *netdev,
265 				 struct ethtool_ringparam *ring)
266 {
267 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
268 	struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
269 	u32 new_rx_count, new_tx_count;
270 	int i, err = 0;
271 
272 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
273 		return -EINVAL;
274 
275 	new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
276 	new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
277 	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
278 
279 	new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
280 	new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
281 	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
282 
283 	/* if nothing to do return success */
284 	if ((new_tx_count == adapter->tx_ring_count) &&
285 	    (new_rx_count == adapter->rx_ring_count))
286 		return 0;
287 
288 	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
289 		usleep_range(1000, 2000);
290 
291 	if (!netif_running(adapter->netdev)) {
292 		for (i = 0; i < adapter->num_tx_queues; i++)
293 			adapter->tx_ring[i]->count = new_tx_count;
294 		for (i = 0; i < adapter->num_rx_queues; i++)
295 			adapter->rx_ring[i]->count = new_rx_count;
296 		adapter->tx_ring_count = new_tx_count;
297 		adapter->rx_ring_count = new_rx_count;
298 		goto clear_reset;
299 	}
300 
301 	if (new_tx_count != adapter->tx_ring_count) {
302 		tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
303 		if (!tx_ring) {
304 			err = -ENOMEM;
305 			goto clear_reset;
306 		}
307 
308 		for (i = 0; i < adapter->num_tx_queues; i++) {
309 			/* clone ring and setup updated count */
310 			tx_ring[i] = *adapter->tx_ring[i];
311 			tx_ring[i].count = new_tx_count;
312 			err = ixgbevf_setup_tx_resources(&tx_ring[i]);
313 			if (err) {
314 				while (i) {
315 					i--;
316 					ixgbevf_free_tx_resources(&tx_ring[i]);
317 				}
318 
319 				vfree(tx_ring);
320 				tx_ring = NULL;
321 
322 				goto clear_reset;
323 			}
324 		}
325 	}
326 
327 	if (new_rx_count != adapter->rx_ring_count) {
328 		rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
329 		if (!rx_ring) {
330 			err = -ENOMEM;
331 			goto clear_reset;
332 		}
333 
334 		for (i = 0; i < adapter->num_rx_queues; i++) {
335 			/* clone ring and setup updated count */
336 			rx_ring[i] = *adapter->rx_ring[i];
337 			rx_ring[i].count = new_rx_count;
338 			err = ixgbevf_setup_rx_resources(&rx_ring[i]);
339 			if (err) {
340 				while (i) {
341 					i--;
342 					ixgbevf_free_rx_resources(&rx_ring[i]);
343 				}
344 
345 				vfree(rx_ring);
346 				rx_ring = NULL;
347 
348 				goto clear_reset;
349 			}
350 		}
351 	}
352 
353 	/* bring interface down to prepare for update */
354 	ixgbevf_down(adapter);
355 
356 	/* Tx */
357 	if (tx_ring) {
358 		for (i = 0; i < adapter->num_tx_queues; i++) {
359 			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
360 			*adapter->tx_ring[i] = tx_ring[i];
361 		}
362 		adapter->tx_ring_count = new_tx_count;
363 
364 		vfree(tx_ring);
365 		tx_ring = NULL;
366 	}
367 
368 	/* Rx */
369 	if (rx_ring) {
370 		for (i = 0; i < adapter->num_rx_queues; i++) {
371 			ixgbevf_free_rx_resources(adapter->rx_ring[i]);
372 			*adapter->rx_ring[i] = rx_ring[i];
373 		}
374 		adapter->rx_ring_count = new_rx_count;
375 
376 		vfree(rx_ring);
377 		rx_ring = NULL;
378 	}
379 
380 	/* restore interface using new values */
381 	ixgbevf_up(adapter);
382 
383 clear_reset:
384 	/* free Tx resources if Rx error is encountered */
385 	if (tx_ring) {
386 		for (i = 0; i < adapter->num_tx_queues; i++)
387 			ixgbevf_free_tx_resources(&tx_ring[i]);
388 		vfree(tx_ring);
389 	}
390 
391 	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
392 	return err;
393 }
394 
395 static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
396 {
397 	switch (stringset) {
398 	case ETH_SS_TEST:
399 		return IXGBE_TEST_LEN;
400 	case ETH_SS_STATS:
401 		return IXGBE_GLOBAL_STATS_LEN;
402 	default:
403 		return -EINVAL;
404 	}
405 }
406 
407 static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
408 				      struct ethtool_stats *stats, u64 *data)
409 {
410 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
411 	char *base = (char *)adapter;
412 	int i;
413 #ifdef BP_EXTENDED_STATS
414 	u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
415 	    tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
416 
417 	for (i = 0; i < adapter->num_rx_queues; i++) {
418 		rx_yields += adapter->rx_ring[i]->stats.yields;
419 		rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
420 		rx_yields += adapter->rx_ring[i]->stats.yields;
421 	}
422 
423 	for (i = 0; i < adapter->num_tx_queues; i++) {
424 		tx_yields += adapter->tx_ring[i]->stats.yields;
425 		tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
426 		tx_yields += adapter->tx_ring[i]->stats.yields;
427 	}
428 
429 	adapter->bp_rx_yields = rx_yields;
430 	adapter->bp_rx_cleaned = rx_cleaned;
431 	adapter->bp_rx_missed = rx_missed;
432 
433 	adapter->bp_tx_yields = tx_yields;
434 	adapter->bp_tx_cleaned = tx_cleaned;
435 	adapter->bp_tx_missed = tx_missed;
436 #endif
437 
438 	ixgbevf_update_stats(adapter);
439 	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
440 		char *p = base + ixgbe_gstrings_stats[i].stat_offset;
441 		char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
442 		char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
443 
444 		if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
445 			if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
446 				data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
447 			else
448 				data[i] = *(u64 *)p;
449 		} else {
450 			if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
451 				data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
452 			else
453 				data[i] = *(u32 *)p;
454 		}
455 	}
456 }
457 
458 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
459 				u8 *data)
460 {
461 	char *p = (char *)data;
462 	int i;
463 
464 	switch (stringset) {
465 	case ETH_SS_TEST:
466 		memcpy(data, *ixgbe_gstrings_test,
467 		       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
468 		break;
469 	case ETH_SS_STATS:
470 		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
471 			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
472 			       ETH_GSTRING_LEN);
473 			p += ETH_GSTRING_LEN;
474 		}
475 		break;
476 	}
477 }
478 
479 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
480 {
481 	struct ixgbe_hw *hw = &adapter->hw;
482 	bool link_up;
483 	u32 link_speed = 0;
484 	*data = 0;
485 
486 	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
487 	if (!link_up)
488 		*data = 1;
489 
490 	return *data;
491 }
492 
493 /* ethtool register test data */
494 struct ixgbevf_reg_test {
495 	u16 reg;
496 	u8  array_len;
497 	u8  test_type;
498 	u32 mask;
499 	u32 write;
500 };
501 
502 /* In the hardware, registers are laid out either singly, in arrays
503  * spaced 0x40 bytes apart, or in contiguous tables.  We assume
504  * most tests take place on arrays or single registers (handled
505  * as a single-element array) and special-case the tables.
506  * Table tests are always pattern tests.
507  *
508  * We also make provision for some required setup steps by specifying
509  * registers to be written without any read-back testing.
510  */
511 
512 #define PATTERN_TEST	1
513 #define SET_READ_TEST	2
514 #define WRITE_NO_TEST	3
515 #define TABLE32_TEST	4
516 #define TABLE64_TEST_LO	5
517 #define TABLE64_TEST_HI	6
518 
519 /* default VF register test */
520 static const struct ixgbevf_reg_test reg_test_vf[] = {
521 	{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
522 	{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
523 	{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
524 	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
525 	{ IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
526 	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
527 	{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
528 	{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
529 	{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
530 	{ .reg = 0 }
531 };
532 
533 static const u32 register_test_patterns[] = {
534 	0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
535 };
536 
537 static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
538 			     int reg, u32 mask, u32 write)
539 {
540 	u32 pat, val, before;
541 
542 	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
543 		*data = 1;
544 		return true;
545 	}
546 	for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
547 		before = ixgbevf_read_reg(&adapter->hw, reg);
548 		ixgbe_write_reg(&adapter->hw, reg,
549 				register_test_patterns[pat] & write);
550 		val = ixgbevf_read_reg(&adapter->hw, reg);
551 		if (val != (register_test_patterns[pat] & write & mask)) {
552 			hw_dbg(&adapter->hw,
553 			       "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
554 			       reg, val,
555 			       register_test_patterns[pat] & write & mask);
556 			*data = reg;
557 			ixgbe_write_reg(&adapter->hw, reg, before);
558 			return true;
559 		}
560 		ixgbe_write_reg(&adapter->hw, reg, before);
561 	}
562 	return false;
563 }
564 
565 static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
566 			      int reg, u32 mask, u32 write)
567 {
568 	u32 val, before;
569 
570 	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
571 		*data = 1;
572 		return true;
573 	}
574 	before = ixgbevf_read_reg(&adapter->hw, reg);
575 	ixgbe_write_reg(&adapter->hw, reg, write & mask);
576 	val = ixgbevf_read_reg(&adapter->hw, reg);
577 	if ((write & mask) != (val & mask)) {
578 		pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
579 		       reg, (val & mask), write & mask);
580 		*data = reg;
581 		ixgbe_write_reg(&adapter->hw, reg, before);
582 		return true;
583 	}
584 	ixgbe_write_reg(&adapter->hw, reg, before);
585 	return false;
586 }
587 
588 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
589 {
590 	const struct ixgbevf_reg_test *test;
591 	u32 i;
592 
593 	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
594 		dev_err(&adapter->pdev->dev,
595 			"Adapter removed - register test blocked\n");
596 		*data = 1;
597 		return 1;
598 	}
599 	test = reg_test_vf;
600 
601 	/* Perform the register test, looping through the test table
602 	 * until we either fail or reach the null entry.
603 	 */
604 	while (test->reg) {
605 		for (i = 0; i < test->array_len; i++) {
606 			bool b = false;
607 
608 			switch (test->test_type) {
609 			case PATTERN_TEST:
610 				b = reg_pattern_test(adapter, data,
611 						     test->reg + (i * 0x40),
612 						     test->mask,
613 						     test->write);
614 				break;
615 			case SET_READ_TEST:
616 				b = reg_set_and_check(adapter, data,
617 						      test->reg + (i * 0x40),
618 						      test->mask,
619 						      test->write);
620 				break;
621 			case WRITE_NO_TEST:
622 				ixgbe_write_reg(&adapter->hw,
623 						test->reg + (i * 0x40),
624 						test->write);
625 				break;
626 			case TABLE32_TEST:
627 				b = reg_pattern_test(adapter, data,
628 						     test->reg + (i * 4),
629 						     test->mask,
630 						     test->write);
631 				break;
632 			case TABLE64_TEST_LO:
633 				b = reg_pattern_test(adapter, data,
634 						     test->reg + (i * 8),
635 						     test->mask,
636 						     test->write);
637 				break;
638 			case TABLE64_TEST_HI:
639 				b = reg_pattern_test(adapter, data,
640 						     test->reg + 4 + (i * 8),
641 						     test->mask,
642 						     test->write);
643 				break;
644 			}
645 			if (b)
646 				return 1;
647 		}
648 		test++;
649 	}
650 
651 	*data = 0;
652 	return *data;
653 }
654 
655 static void ixgbevf_diag_test(struct net_device *netdev,
656 			      struct ethtool_test *eth_test, u64 *data)
657 {
658 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
659 	bool if_running = netif_running(netdev);
660 
661 	if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
662 		dev_err(&adapter->pdev->dev,
663 			"Adapter removed - test blocked\n");
664 		data[0] = 1;
665 		data[1] = 1;
666 		eth_test->flags |= ETH_TEST_FL_FAILED;
667 		return;
668 	}
669 	set_bit(__IXGBEVF_TESTING, &adapter->state);
670 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
671 		/* Offline tests */
672 
673 		hw_dbg(&adapter->hw, "offline testing starting\n");
674 
675 		/* Link test performed before hardware reset so autoneg doesn't
676 		 * interfere with test result
677 		 */
678 		if (ixgbevf_link_test(adapter, &data[1]))
679 			eth_test->flags |= ETH_TEST_FL_FAILED;
680 
681 		if (if_running)
682 			/* indicate we're in test mode */
683 			dev_close(netdev);
684 		else
685 			ixgbevf_reset(adapter);
686 
687 		hw_dbg(&adapter->hw, "register testing starting\n");
688 		if (ixgbevf_reg_test(adapter, &data[0]))
689 			eth_test->flags |= ETH_TEST_FL_FAILED;
690 
691 		ixgbevf_reset(adapter);
692 
693 		clear_bit(__IXGBEVF_TESTING, &adapter->state);
694 		if (if_running)
695 			dev_open(netdev);
696 	} else {
697 		hw_dbg(&adapter->hw, "online testing starting\n");
698 		/* Online tests */
699 		if (ixgbevf_link_test(adapter, &data[1]))
700 			eth_test->flags |= ETH_TEST_FL_FAILED;
701 
702 		/* Online tests aren't run; pass by default */
703 		data[0] = 0;
704 
705 		clear_bit(__IXGBEVF_TESTING, &adapter->state);
706 	}
707 	msleep_interruptible(4 * 1000);
708 }
709 
710 static int ixgbevf_nway_reset(struct net_device *netdev)
711 {
712 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
713 
714 	if (netif_running(netdev))
715 		ixgbevf_reinit_locked(adapter);
716 
717 	return 0;
718 }
719 
720 static int ixgbevf_get_coalesce(struct net_device *netdev,
721 				struct ethtool_coalesce *ec)
722 {
723 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
724 
725 	/* only valid if in constant ITR mode */
726 	if (adapter->rx_itr_setting <= 1)
727 		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
728 	else
729 		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
730 
731 	/* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
732 	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
733 		return 0;
734 
735 	/* only valid if in constant ITR mode */
736 	if (adapter->tx_itr_setting <= 1)
737 		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
738 	else
739 		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
740 
741 	return 0;
742 }
743 
744 static int ixgbevf_set_coalesce(struct net_device *netdev,
745 				struct ethtool_coalesce *ec)
746 {
747 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
748 	struct ixgbevf_q_vector *q_vector;
749 	int num_vectors, i;
750 	u16 tx_itr_param, rx_itr_param;
751 
752 	/* don't accept Tx specific changes if we've got mixed RxTx vectors */
753 	if (adapter->q_vector[0]->tx.count &&
754 	    adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
755 		return -EINVAL;
756 
757 	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
758 	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
759 		return -EINVAL;
760 
761 	if (ec->rx_coalesce_usecs > 1)
762 		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
763 	else
764 		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
765 
766 	if (adapter->rx_itr_setting == 1)
767 		rx_itr_param = IXGBE_20K_ITR;
768 	else
769 		rx_itr_param = adapter->rx_itr_setting;
770 
771 	if (ec->tx_coalesce_usecs > 1)
772 		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
773 	else
774 		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
775 
776 	if (adapter->tx_itr_setting == 1)
777 		tx_itr_param = IXGBE_10K_ITR;
778 	else
779 		tx_itr_param = adapter->tx_itr_setting;
780 
781 	num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
782 
783 	for (i = 0; i < num_vectors; i++) {
784 		q_vector = adapter->q_vector[i];
785 		if (q_vector->tx.count && !q_vector->rx.count)
786 			/* Tx only */
787 			q_vector->itr = tx_itr_param;
788 		else
789 			/* Rx only or mixed */
790 			q_vector->itr = rx_itr_param;
791 		ixgbevf_write_eitr(q_vector);
792 	}
793 
794 	return 0;
795 }
796 
797 static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
798 			     u32 *rules __always_unused)
799 {
800 	struct ixgbevf_adapter *adapter = netdev_priv(dev);
801 
802 	switch (info->cmd) {
803 	case ETHTOOL_GRXRINGS:
804 		info->data = adapter->num_rx_queues;
805 		return 0;
806 	default:
807 		hw_dbg(&adapter->hw, "Command parameters not supported\n");
808 		return -EOPNOTSUPP;
809 	}
810 }
811 
812 static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
813 {
814 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
815 
816 	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
817 		return IXGBEVF_X550_VFRETA_SIZE;
818 
819 	return IXGBEVF_82599_RETA_SIZE;
820 }
821 
822 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
823 {
824 	return IXGBEVF_RSS_HASH_KEY_SIZE;
825 }
826 
827 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
828 			    u8 *hfunc)
829 {
830 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
831 	int err = 0;
832 
833 	if (hfunc)
834 		*hfunc = ETH_RSS_HASH_TOP;
835 
836 	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
837 		if (key)
838 			memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
839 
840 		if (indir) {
841 			int i;
842 
843 			for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
844 				indir[i] = adapter->rss_indir_tbl[i];
845 		}
846 	} else {
847 		/* If neither indirection table nor hash key was requested
848 		 *  - just return a success avoiding taking any locks.
849 		 */
850 		if (!indir && !key)
851 			return 0;
852 
853 		spin_lock_bh(&adapter->mbx_lock);
854 		if (indir)
855 			err = ixgbevf_get_reta_locked(&adapter->hw, indir,
856 						      adapter->num_rx_queues);
857 
858 		if (!err && key)
859 			err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
860 
861 		spin_unlock_bh(&adapter->mbx_lock);
862 	}
863 
864 	return err;
865 }
866 
867 static const struct ethtool_ops ixgbevf_ethtool_ops = {
868 	.get_settings		= ixgbevf_get_settings,
869 	.get_drvinfo		= ixgbevf_get_drvinfo,
870 	.get_regs_len		= ixgbevf_get_regs_len,
871 	.get_regs		= ixgbevf_get_regs,
872 	.nway_reset		= ixgbevf_nway_reset,
873 	.get_link		= ethtool_op_get_link,
874 	.get_ringparam		= ixgbevf_get_ringparam,
875 	.set_ringparam		= ixgbevf_set_ringparam,
876 	.get_msglevel		= ixgbevf_get_msglevel,
877 	.set_msglevel		= ixgbevf_set_msglevel,
878 	.self_test		= ixgbevf_diag_test,
879 	.get_sset_count		= ixgbevf_get_sset_count,
880 	.get_strings		= ixgbevf_get_strings,
881 	.get_ethtool_stats	= ixgbevf_get_ethtool_stats,
882 	.get_coalesce		= ixgbevf_get_coalesce,
883 	.set_coalesce		= ixgbevf_set_coalesce,
884 	.get_rxnfc		= ixgbevf_get_rxnfc,
885 	.get_rxfh_indir_size	= ixgbevf_get_rxfh_indir_size,
886 	.get_rxfh_key_size	= ixgbevf_get_rxfh_key_size,
887 	.get_rxfh		= ixgbevf_get_rxfh,
888 };
889 
890 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
891 {
892 	netdev->ethtool_ops = &ixgbevf_ethtool_ops;
893 }
894