1 /* Intel Ethernet Switch Host Interface Driver
2  * Copyright(c) 2013 - 2015 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * The full GNU General Public License is included in this distribution in
14  * the file called "COPYING".
15  *
16  * Contact Information:
17  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19  */
20 
21 #include <linux/vmalloc.h>
22 
23 #include "fm10k.h"
24 
25 struct fm10k_stats {
26 	char stat_string[ETH_GSTRING_LEN];
27 	int sizeof_stat;
28 	int stat_offset;
29 };
30 
31 #define FM10K_NETDEV_STAT(_net_stat) { \
32 	.stat_string = #_net_stat, \
33 	.sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
34 	.stat_offset = offsetof(struct net_device_stats, _net_stat) \
35 }
36 
37 static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
38 	FM10K_NETDEV_STAT(tx_packets),
39 	FM10K_NETDEV_STAT(tx_bytes),
40 	FM10K_NETDEV_STAT(tx_errors),
41 	FM10K_NETDEV_STAT(rx_packets),
42 	FM10K_NETDEV_STAT(rx_bytes),
43 	FM10K_NETDEV_STAT(rx_errors),
44 	FM10K_NETDEV_STAT(rx_dropped),
45 
46 	/* detailed Rx errors */
47 	FM10K_NETDEV_STAT(rx_length_errors),
48 	FM10K_NETDEV_STAT(rx_crc_errors),
49 	FM10K_NETDEV_STAT(rx_fifo_errors),
50 };
51 
52 #define FM10K_NETDEV_STATS_LEN	ARRAY_SIZE(fm10k_gstrings_net_stats)
53 
54 #define FM10K_STAT(_name, _stat) { \
55 	.stat_string = _name, \
56 	.sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \
57 	.stat_offset = offsetof(struct fm10k_intfc, _stat) \
58 }
59 
60 static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
61 	FM10K_STAT("tx_restart_queue", restart_queue),
62 	FM10K_STAT("tx_busy", tx_busy),
63 	FM10K_STAT("tx_csum_errors", tx_csum_errors),
64 	FM10K_STAT("rx_alloc_failed", alloc_failed),
65 	FM10K_STAT("rx_csum_errors", rx_csum_errors),
66 
67 	FM10K_STAT("tx_packets_nic", tx_packets_nic),
68 	FM10K_STAT("tx_bytes_nic", tx_bytes_nic),
69 	FM10K_STAT("rx_packets_nic", rx_packets_nic),
70 	FM10K_STAT("rx_bytes_nic", rx_bytes_nic),
71 	FM10K_STAT("rx_drops_nic", rx_drops_nic),
72 	FM10K_STAT("rx_overrun_pf", rx_overrun_pf),
73 	FM10K_STAT("rx_overrun_vf", rx_overrun_vf),
74 
75 	FM10K_STAT("swapi_status", hw.swapi.status),
76 	FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
77 	FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
78 
79 	FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy),
80 	FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped),
81 	FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages),
82 	FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords),
83 	FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages),
84 	FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords),
85 	FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err),
86 
87 	FM10K_STAT("tx_hang_count", tx_timeout_count),
88 
89 	FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
90 };
91 
92 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
93 	FM10K_STAT("timeout", stats.timeout.count),
94 	FM10K_STAT("ur", stats.ur.count),
95 	FM10K_STAT("ca", stats.ca.count),
96 	FM10K_STAT("um", stats.um.count),
97 	FM10K_STAT("xec", stats.xec.count),
98 	FM10K_STAT("vlan_drop", stats.vlan_drop.count),
99 	FM10K_STAT("loopback_drop", stats.loopback_drop.count),
100 	FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
101 };
102 
103 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
104 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
105 
106 #define FM10K_QUEUE_STATS_LEN(_n) \
107 	( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
108 
109 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
110 				FM10K_NETDEV_STATS_LEN)
111 
112 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
113 	"Mailbox test (on/offline)"
114 };
115 
116 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN)
117 
118 enum fm10k_self_test_types {
119 	FM10K_TEST_MBX,
120 	FM10K_TEST_MAX = FM10K_TEST_LEN
121 };
122 
123 static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
124 {
125 	struct fm10k_intfc *interface = netdev_priv(dev);
126 	char *p = (char *)data;
127 	int i;
128 
129 	switch (stringset) {
130 	case ETH_SS_TEST:
131 		memcpy(data, *fm10k_gstrings_test,
132 		       FM10K_TEST_LEN * ETH_GSTRING_LEN);
133 		break;
134 	case ETH_SS_STATS:
135 		for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
136 			memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
137 			       ETH_GSTRING_LEN);
138 			p += ETH_GSTRING_LEN;
139 		}
140 		for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
141 			memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
142 			       ETH_GSTRING_LEN);
143 			p += ETH_GSTRING_LEN;
144 		}
145 
146 		if (interface->hw.mac.type != fm10k_mac_vf)
147 			for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
148 				memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
149 				       ETH_GSTRING_LEN);
150 				p += ETH_GSTRING_LEN;
151 			}
152 
153 		for (i = 0; i < interface->hw.mac.max_queues; i++) {
154 			sprintf(p, "tx_queue_%u_packets", i);
155 			p += ETH_GSTRING_LEN;
156 			sprintf(p, "tx_queue_%u_bytes", i);
157 			p += ETH_GSTRING_LEN;
158 			sprintf(p, "rx_queue_%u_packets", i);
159 			p += ETH_GSTRING_LEN;
160 			sprintf(p, "rx_queue_%u_bytes", i);
161 			p += ETH_GSTRING_LEN;
162 		}
163 		break;
164 	}
165 }
166 
167 static int fm10k_get_sset_count(struct net_device *dev, int sset)
168 {
169 	struct fm10k_intfc *interface = netdev_priv(dev);
170 	struct fm10k_hw *hw = &interface->hw;
171 	int stats_len = FM10K_STATIC_STATS_LEN;
172 
173 	switch (sset) {
174 	case ETH_SS_TEST:
175 		return FM10K_TEST_LEN;
176 	case ETH_SS_STATS:
177 		stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
178 
179 		if (hw->mac.type != fm10k_mac_vf)
180 			stats_len += FM10K_PF_STATS_LEN;
181 
182 		return stats_len;
183 	default:
184 		return -EOPNOTSUPP;
185 	}
186 }
187 
188 static void fm10k_get_ethtool_stats(struct net_device *netdev,
189 				    struct ethtool_stats __always_unused *stats,
190 				    u64 *data)
191 {
192 	const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
193 	struct fm10k_intfc *interface = netdev_priv(netdev);
194 	struct net_device_stats *net_stats = &netdev->stats;
195 	char *p;
196 	int i, j;
197 
198 	fm10k_update_stats(interface);
199 
200 	for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
201 		p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset;
202 		*(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
203 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
204 	}
205 
206 	for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
207 		p = (char *)interface +
208 		    fm10k_gstrings_global_stats[i].stat_offset;
209 		*(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
210 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
211 	}
212 
213 	if (interface->hw.mac.type != fm10k_mac_vf)
214 		for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
215 			p = (char *)interface +
216 			    fm10k_gstrings_pf_stats[i].stat_offset;
217 			*(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
218 				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
219 		}
220 
221 	for (i = 0; i < interface->hw.mac.max_queues; i++) {
222 		struct fm10k_ring *ring;
223 		u64 *queue_stat;
224 
225 		ring = interface->tx_ring[i];
226 		if (ring)
227 			queue_stat = (u64 *)&ring->stats;
228 		for (j = 0; j < stat_count; j++)
229 			*(data++) = ring ? queue_stat[j] : 0;
230 
231 		ring = interface->rx_ring[i];
232 		if (ring)
233 			queue_stat = (u64 *)&ring->stats;
234 		for (j = 0; j < stat_count; j++)
235 			*(data++) = ring ? queue_stat[j] : 0;
236 	}
237 }
238 
239 /* If function below adds more registers this define needs to be updated */
240 #define FM10K_REGS_LEN_Q 29
241 
242 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i)
243 {
244 	int idx = 0;
245 
246 	buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i));
247 	buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i));
248 	buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i));
249 	buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i));
250 	buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i));
251 	buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i));
252 	buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i));
253 	buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i));
254 	buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i));
255 	buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i));
256 	buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i));
257 	buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i));
258 	buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i));
259 	buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i));
260 	buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i));
261 	buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i));
262 	buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i));
263 	buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i));
264 	buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i));
265 	buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i));
266 	buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i));
267 	buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i));
268 	buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i));
269 	buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i));
270 	buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i));
271 	buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i));
272 	buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i));
273 	buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i));
274 	buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i));
275 
276 	BUG_ON(idx != FM10K_REGS_LEN_Q);
277 }
278 
279 /* If function above adds more registers this define needs to be updated */
280 #define FM10K_REGS_LEN_VSI 43
281 
282 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i)
283 {
284 	int idx = 0, j;
285 
286 	buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i));
287 	for (j = 0; j < 10; j++)
288 		buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j));
289 	for (j = 0; j < 32; j++)
290 		buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j));
291 
292 	BUG_ON(idx != FM10K_REGS_LEN_VSI);
293 }
294 
295 static void fm10k_get_regs(struct net_device *netdev,
296 			   struct ethtool_regs *regs, void *p)
297 {
298 	struct fm10k_intfc *interface = netdev_priv(netdev);
299 	struct fm10k_hw *hw = &interface->hw;
300 	u32 *buff = p;
301 	u16 i;
302 
303 	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
304 
305 	switch (hw->mac.type) {
306 	case fm10k_mac_pf:
307 		/* General PF Registers */
308 		*(buff++) = fm10k_read_reg(hw, FM10K_CTRL);
309 		*(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT);
310 		*(buff++) = fm10k_read_reg(hw, FM10K_GCR);
311 		*(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT);
312 
313 		for (i = 0; i < 8; i++) {
314 			*(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i));
315 			*(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i));
316 		}
317 
318 		for (i = 0; i < 65; i++) {
319 			fm10k_get_reg_vsi(hw, buff, i);
320 			buff += FM10K_REGS_LEN_VSI;
321 		}
322 
323 		*(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL);
324 		*(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
325 
326 		for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
327 			fm10k_get_reg_q(hw, buff, i);
328 			buff += FM10K_REGS_LEN_Q;
329 		}
330 
331 		*(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL);
332 
333 		for (i = 0; i < 8; i++)
334 			*(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i));
335 
336 		/* Interrupt Throttling Registers */
337 		for (i = 0; i < 130; i++)
338 			*(buff++) = fm10k_read_reg(hw, FM10K_ITR(i));
339 
340 		break;
341 	case fm10k_mac_vf:
342 		/* General VF registers */
343 		*(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL);
344 		*(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP);
345 		*(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME);
346 
347 		/* Interrupt Throttling Registers */
348 		for (i = 0; i < 8; i++)
349 			*(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i));
350 
351 		fm10k_get_reg_vsi(hw, buff, 0);
352 		buff += FM10K_REGS_LEN_VSI;
353 
354 		for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) {
355 			if (i < hw->mac.max_queues)
356 				fm10k_get_reg_q(hw, buff, i);
357 			else
358 				memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q);
359 			buff += FM10K_REGS_LEN_Q;
360 		}
361 
362 		break;
363 	default:
364 		return;
365 	}
366 }
367 
368 /* If function above adds more registers these define need to be updated */
369 #define FM10K_REGS_LEN_PF \
370 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q))
371 #define FM10K_REGS_LEN_VF \
372 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q))
373 
374 static int fm10k_get_regs_len(struct net_device *netdev)
375 {
376 	struct fm10k_intfc *interface = netdev_priv(netdev);
377 	struct fm10k_hw *hw = &interface->hw;
378 
379 	switch (hw->mac.type) {
380 	case fm10k_mac_pf:
381 		return FM10K_REGS_LEN_PF * sizeof(u32);
382 	case fm10k_mac_vf:
383 		return FM10K_REGS_LEN_VF * sizeof(u32);
384 	default:
385 		return 0;
386 	}
387 }
388 
389 static void fm10k_get_drvinfo(struct net_device *dev,
390 			      struct ethtool_drvinfo *info)
391 {
392 	struct fm10k_intfc *interface = netdev_priv(dev);
393 
394 	strncpy(info->driver, fm10k_driver_name,
395 		sizeof(info->driver) - 1);
396 	strncpy(info->version, fm10k_driver_version,
397 		sizeof(info->version) - 1);
398 	strncpy(info->bus_info, pci_name(interface->pdev),
399 		sizeof(info->bus_info) - 1);
400 
401 	info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS);
402 
403 	info->regdump_len = fm10k_get_regs_len(dev);
404 }
405 
406 static void fm10k_get_pauseparam(struct net_device *dev,
407 				 struct ethtool_pauseparam *pause)
408 {
409 	struct fm10k_intfc *interface = netdev_priv(dev);
410 
411 	/* record fixed values for autoneg and tx pause */
412 	pause->autoneg = 0;
413 	pause->tx_pause = 1;
414 
415 	pause->rx_pause = interface->rx_pause ? 1 : 0;
416 }
417 
418 static int fm10k_set_pauseparam(struct net_device *dev,
419 				struct ethtool_pauseparam *pause)
420 {
421 	struct fm10k_intfc *interface = netdev_priv(dev);
422 	struct fm10k_hw *hw = &interface->hw;
423 
424 	if (pause->autoneg || !pause->tx_pause)
425 		return -EINVAL;
426 
427 	/* we can only support pause on the PF to avoid head-of-line blocking */
428 	if (hw->mac.type == fm10k_mac_pf)
429 		interface->rx_pause = pause->rx_pause ? ~0 : 0;
430 	else if (pause->rx_pause)
431 		return -EINVAL;
432 
433 	if (netif_running(dev))
434 		fm10k_update_rx_drop_en(interface);
435 
436 	return 0;
437 }
438 
439 static u32 fm10k_get_msglevel(struct net_device *netdev)
440 {
441 	struct fm10k_intfc *interface = netdev_priv(netdev);
442 
443 	return interface->msg_enable;
444 }
445 
446 static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
447 {
448 	struct fm10k_intfc *interface = netdev_priv(netdev);
449 
450 	interface->msg_enable = data;
451 }
452 
453 static void fm10k_get_ringparam(struct net_device *netdev,
454 				struct ethtool_ringparam *ring)
455 {
456 	struct fm10k_intfc *interface = netdev_priv(netdev);
457 
458 	ring->rx_max_pending = FM10K_MAX_RXD;
459 	ring->tx_max_pending = FM10K_MAX_TXD;
460 	ring->rx_mini_max_pending = 0;
461 	ring->rx_jumbo_max_pending = 0;
462 	ring->rx_pending = interface->rx_ring_count;
463 	ring->tx_pending = interface->tx_ring_count;
464 	ring->rx_mini_pending = 0;
465 	ring->rx_jumbo_pending = 0;
466 }
467 
468 static int fm10k_set_ringparam(struct net_device *netdev,
469 			       struct ethtool_ringparam *ring)
470 {
471 	struct fm10k_intfc *interface = netdev_priv(netdev);
472 	struct fm10k_ring *temp_ring;
473 	int i, err = 0;
474 	u32 new_rx_count, new_tx_count;
475 
476 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
477 		return -EINVAL;
478 
479 	new_tx_count = clamp_t(u32, ring->tx_pending,
480 			       FM10K_MIN_TXD, FM10K_MAX_TXD);
481 	new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE);
482 
483 	new_rx_count = clamp_t(u32, ring->rx_pending,
484 			       FM10K_MIN_RXD, FM10K_MAX_RXD);
485 	new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE);
486 
487 	if ((new_tx_count == interface->tx_ring_count) &&
488 	    (new_rx_count == interface->rx_ring_count)) {
489 		/* nothing to do */
490 		return 0;
491 	}
492 
493 	while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
494 		usleep_range(1000, 2000);
495 
496 	if (!netif_running(interface->netdev)) {
497 		for (i = 0; i < interface->num_tx_queues; i++)
498 			interface->tx_ring[i]->count = new_tx_count;
499 		for (i = 0; i < interface->num_rx_queues; i++)
500 			interface->rx_ring[i]->count = new_rx_count;
501 		interface->tx_ring_count = new_tx_count;
502 		interface->rx_ring_count = new_rx_count;
503 		goto clear_reset;
504 	}
505 
506 	/* allocate temporary buffer to store rings in */
507 	i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
508 	temp_ring = vmalloc(i * sizeof(struct fm10k_ring));
509 
510 	if (!temp_ring) {
511 		err = -ENOMEM;
512 		goto clear_reset;
513 	}
514 
515 	fm10k_down(interface);
516 
517 	/* Setup new Tx resources and free the old Tx resources in that order.
518 	 * We can then assign the new resources to the rings via a memcpy.
519 	 * The advantage to this approach is that we are guaranteed to still
520 	 * have resources even in the case of an allocation failure.
521 	 */
522 	if (new_tx_count != interface->tx_ring_count) {
523 		for (i = 0; i < interface->num_tx_queues; i++) {
524 			memcpy(&temp_ring[i], interface->tx_ring[i],
525 			       sizeof(struct fm10k_ring));
526 
527 			temp_ring[i].count = new_tx_count;
528 			err = fm10k_setup_tx_resources(&temp_ring[i]);
529 			if (err) {
530 				while (i) {
531 					i--;
532 					fm10k_free_tx_resources(&temp_ring[i]);
533 				}
534 				goto err_setup;
535 			}
536 		}
537 
538 		for (i = 0; i < interface->num_tx_queues; i++) {
539 			fm10k_free_tx_resources(interface->tx_ring[i]);
540 
541 			memcpy(interface->tx_ring[i], &temp_ring[i],
542 			       sizeof(struct fm10k_ring));
543 		}
544 
545 		interface->tx_ring_count = new_tx_count;
546 	}
547 
548 	/* Repeat the process for the Rx rings if needed */
549 	if (new_rx_count != interface->rx_ring_count) {
550 		for (i = 0; i < interface->num_rx_queues; i++) {
551 			memcpy(&temp_ring[i], interface->rx_ring[i],
552 			       sizeof(struct fm10k_ring));
553 
554 			temp_ring[i].count = new_rx_count;
555 			err = fm10k_setup_rx_resources(&temp_ring[i]);
556 			if (err) {
557 				while (i) {
558 					i--;
559 					fm10k_free_rx_resources(&temp_ring[i]);
560 				}
561 				goto err_setup;
562 			}
563 		}
564 
565 		for (i = 0; i < interface->num_rx_queues; i++) {
566 			fm10k_free_rx_resources(interface->rx_ring[i]);
567 
568 			memcpy(interface->rx_ring[i], &temp_ring[i],
569 			       sizeof(struct fm10k_ring));
570 		}
571 
572 		interface->rx_ring_count = new_rx_count;
573 	}
574 
575 err_setup:
576 	fm10k_up(interface);
577 	vfree(temp_ring);
578 clear_reset:
579 	clear_bit(__FM10K_RESETTING, &interface->state);
580 	return err;
581 }
582 
583 static int fm10k_get_coalesce(struct net_device *dev,
584 			      struct ethtool_coalesce *ec)
585 {
586 	struct fm10k_intfc *interface = netdev_priv(dev);
587 
588 	ec->use_adaptive_tx_coalesce =
589 		!!(interface->tx_itr & FM10K_ITR_ADAPTIVE);
590 	ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE;
591 
592 	ec->use_adaptive_rx_coalesce =
593 		!!(interface->rx_itr & FM10K_ITR_ADAPTIVE);
594 	ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE;
595 
596 	return 0;
597 }
598 
599 static int fm10k_set_coalesce(struct net_device *dev,
600 			      struct ethtool_coalesce *ec)
601 {
602 	struct fm10k_intfc *interface = netdev_priv(dev);
603 	struct fm10k_q_vector *qv;
604 	u16 tx_itr, rx_itr;
605 	int i;
606 
607 	/* verify limits */
608 	if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) ||
609 	    (ec->tx_coalesce_usecs > FM10K_ITR_MAX))
610 		return -EINVAL;
611 
612 	/* record settings */
613 	tx_itr = ec->tx_coalesce_usecs;
614 	rx_itr = ec->rx_coalesce_usecs;
615 
616 	/* set initial values for adaptive ITR */
617 	if (ec->use_adaptive_tx_coalesce)
618 		tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K;
619 
620 	if (ec->use_adaptive_rx_coalesce)
621 		rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K;
622 
623 	/* update interface */
624 	interface->tx_itr = tx_itr;
625 	interface->rx_itr = rx_itr;
626 
627 	/* update q_vectors */
628 	for (i = 0; i < interface->num_q_vectors; i++) {
629 		qv = interface->q_vector[i];
630 		qv->tx.itr = tx_itr;
631 		qv->rx.itr = rx_itr;
632 	}
633 
634 	return 0;
635 }
636 
637 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
638 				   struct ethtool_rxnfc *cmd)
639 {
640 	cmd->data = 0;
641 
642 	/* Report default options for RSS on fm10k */
643 	switch (cmd->flow_type) {
644 	case TCP_V4_FLOW:
645 	case TCP_V6_FLOW:
646 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
647 		/* fall through */
648 	case UDP_V4_FLOW:
649 		if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
650 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
651 		/* fall through */
652 	case SCTP_V4_FLOW:
653 	case SCTP_V6_FLOW:
654 	case AH_ESP_V4_FLOW:
655 	case AH_ESP_V6_FLOW:
656 	case AH_V4_FLOW:
657 	case AH_V6_FLOW:
658 	case ESP_V4_FLOW:
659 	case ESP_V6_FLOW:
660 	case IPV4_FLOW:
661 	case IPV6_FLOW:
662 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
663 		break;
664 	case UDP_V6_FLOW:
665 		if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
666 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
667 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
668 		break;
669 	default:
670 		return -EINVAL;
671 	}
672 
673 	return 0;
674 }
675 
676 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
677 			   u32 __always_unused *rule_locs)
678 {
679 	struct fm10k_intfc *interface = netdev_priv(dev);
680 	int ret = -EOPNOTSUPP;
681 
682 	switch (cmd->cmd) {
683 	case ETHTOOL_GRXRINGS:
684 		cmd->data = interface->num_rx_queues;
685 		ret = 0;
686 		break;
687 	case ETHTOOL_GRXFH:
688 		ret = fm10k_get_rss_hash_opts(interface, cmd);
689 		break;
690 	default:
691 		break;
692 	}
693 
694 	return ret;
695 }
696 
697 #define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \
698 		       FM10K_FLAG_RSS_FIELD_IPV6_UDP)
699 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
700 				  struct ethtool_rxnfc *nfc)
701 {
702 	u32 flags = interface->flags;
703 
704 	/* RSS does not support anything other than hashing
705 	 * to queues on src and dst IPs and ports
706 	 */
707 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
708 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
709 		return -EINVAL;
710 
711 	switch (nfc->flow_type) {
712 	case TCP_V4_FLOW:
713 	case TCP_V6_FLOW:
714 		if (!(nfc->data & RXH_IP_SRC) ||
715 		    !(nfc->data & RXH_IP_DST) ||
716 		    !(nfc->data & RXH_L4_B_0_1) ||
717 		    !(nfc->data & RXH_L4_B_2_3))
718 			return -EINVAL;
719 		break;
720 	case UDP_V4_FLOW:
721 		if (!(nfc->data & RXH_IP_SRC) ||
722 		    !(nfc->data & RXH_IP_DST))
723 			return -EINVAL;
724 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
725 		case 0:
726 			flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP;
727 			break;
728 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
729 			flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP;
730 			break;
731 		default:
732 			return -EINVAL;
733 		}
734 		break;
735 	case UDP_V6_FLOW:
736 		if (!(nfc->data & RXH_IP_SRC) ||
737 		    !(nfc->data & RXH_IP_DST))
738 			return -EINVAL;
739 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
740 		case 0:
741 			flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP;
742 			break;
743 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
744 			flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP;
745 			break;
746 		default:
747 			return -EINVAL;
748 		}
749 		break;
750 	case AH_ESP_V4_FLOW:
751 	case AH_V4_FLOW:
752 	case ESP_V4_FLOW:
753 	case SCTP_V4_FLOW:
754 	case AH_ESP_V6_FLOW:
755 	case AH_V6_FLOW:
756 	case ESP_V6_FLOW:
757 	case SCTP_V6_FLOW:
758 		if (!(nfc->data & RXH_IP_SRC) ||
759 		    !(nfc->data & RXH_IP_DST) ||
760 		    (nfc->data & RXH_L4_B_0_1) ||
761 		    (nfc->data & RXH_L4_B_2_3))
762 			return -EINVAL;
763 		break;
764 	default:
765 		return -EINVAL;
766 	}
767 
768 	/* if we changed something we need to update flags */
769 	if (flags != interface->flags) {
770 		struct fm10k_hw *hw = &interface->hw;
771 		u32 mrqc;
772 
773 		if ((flags & UDP_RSS_FLAGS) &&
774 		    !(interface->flags & UDP_RSS_FLAGS))
775 			netif_warn(interface, drv, interface->netdev,
776 				   "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
777 
778 		interface->flags = flags;
779 
780 		/* Perform hash on these packet types */
781 		mrqc = FM10K_MRQC_IPV4 |
782 		       FM10K_MRQC_TCP_IPV4 |
783 		       FM10K_MRQC_IPV6 |
784 		       FM10K_MRQC_TCP_IPV6;
785 
786 		if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
787 			mrqc |= FM10K_MRQC_UDP_IPV4;
788 		if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
789 			mrqc |= FM10K_MRQC_UDP_IPV6;
790 
791 		fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
792 	}
793 
794 	return 0;
795 }
796 
797 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
798 {
799 	struct fm10k_intfc *interface = netdev_priv(dev);
800 	int ret = -EOPNOTSUPP;
801 
802 	switch (cmd->cmd) {
803 	case ETHTOOL_SRXFH:
804 		ret = fm10k_set_rss_hash_opt(interface, cmd);
805 		break;
806 	default:
807 		break;
808 	}
809 
810 	return ret;
811 }
812 
813 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
814 {
815 	struct fm10k_hw *hw = &interface->hw;
816 	struct fm10k_mbx_info *mbx = &hw->mbx;
817 	u32 attr_flag, test_msg[6];
818 	unsigned long timeout;
819 	int err;
820 
821 	/* For now this is a VF only feature */
822 	if (hw->mac.type != fm10k_mac_vf)
823 		return 0;
824 
825 	/* loop through both nested and unnested attribute types */
826 	for (attr_flag = (1 << FM10K_TEST_MSG_UNSET);
827 	     attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED));
828 	     attr_flag += attr_flag) {
829 		/* generate message to be tested */
830 		fm10k_tlv_msg_test_create(test_msg, attr_flag);
831 
832 		fm10k_mbx_lock(interface);
833 		mbx->test_result = FM10K_NOT_IMPLEMENTED;
834 		err = mbx->ops.enqueue_tx(hw, mbx, test_msg);
835 		fm10k_mbx_unlock(interface);
836 
837 		/* wait up to 1 second for response */
838 		timeout = jiffies + HZ;
839 		do {
840 			if (err < 0)
841 				goto err_out;
842 
843 			usleep_range(500, 1000);
844 
845 			fm10k_mbx_lock(interface);
846 			mbx->ops.process(hw, mbx);
847 			fm10k_mbx_unlock(interface);
848 
849 			err = mbx->test_result;
850 			if (!err)
851 				break;
852 		} while (time_is_after_jiffies(timeout));
853 
854 		/* reporting errors */
855 		if (err)
856 			goto err_out;
857 	}
858 
859 err_out:
860 	*data = err < 0 ? (attr_flag) : (err > 0);
861 	return err;
862 }
863 
864 static void fm10k_self_test(struct net_device *dev,
865 			    struct ethtool_test *eth_test, u64 *data)
866 {
867 	struct fm10k_intfc *interface = netdev_priv(dev);
868 	struct fm10k_hw *hw = &interface->hw;
869 
870 	memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
871 
872 	if (FM10K_REMOVED(hw)) {
873 		netif_err(interface, drv, dev,
874 			  "Interface removed - test blocked\n");
875 		eth_test->flags |= ETH_TEST_FL_FAILED;
876 		return;
877 	}
878 
879 	if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX]))
880 		eth_test->flags |= ETH_TEST_FL_FAILED;
881 }
882 
883 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
884 {
885 	return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
886 }
887 
888 static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
889 {
890 	struct fm10k_intfc *interface = netdev_priv(netdev);
891 	int i;
892 
893 	if (!indir)
894 		return 0;
895 
896 	for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
897 		u32 reta = interface->reta[i];
898 
899 		indir[0] = (reta << 24) >> 24;
900 		indir[1] = (reta << 16) >> 24;
901 		indir[2] = (reta <<  8) >> 24;
902 		indir[3] = (reta) >> 24;
903 	}
904 
905 	return 0;
906 }
907 
908 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
909 {
910 	struct fm10k_intfc *interface = netdev_priv(netdev);
911 	struct fm10k_hw *hw = &interface->hw;
912 	int i;
913 	u16 rss_i;
914 
915 	if (!indir)
916 		return 0;
917 
918 	/* Verify user input. */
919 	rss_i = interface->ring_feature[RING_F_RSS].indices;
920 	for (i = fm10k_get_reta_size(netdev); i--;) {
921 		if (indir[i] < rss_i)
922 			continue;
923 		return -EINVAL;
924 	}
925 
926 	/* record entries to reta table */
927 	for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
928 		u32 reta = indir[0] |
929 			   (indir[1] << 8) |
930 			   (indir[2] << 16) |
931 			   (indir[3] << 24);
932 
933 		if (interface->reta[i] == reta)
934 			continue;
935 
936 		interface->reta[i] = reta;
937 		fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
938 	}
939 
940 	return 0;
941 }
942 
943 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
944 {
945 	return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
946 }
947 
948 static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
949 			  u8 *hfunc)
950 {
951 	struct fm10k_intfc *interface = netdev_priv(netdev);
952 	int i, err;
953 
954 	if (hfunc)
955 		*hfunc = ETH_RSS_HASH_TOP;
956 
957 	err = fm10k_get_reta(netdev, indir);
958 	if (err || !key)
959 		return err;
960 
961 	for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4)
962 		*(__le32 *)key = cpu_to_le32(interface->rssrk[i]);
963 
964 	return 0;
965 }
966 
967 static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
968 			  const u8 *key, const u8 hfunc)
969 {
970 	struct fm10k_intfc *interface = netdev_priv(netdev);
971 	struct fm10k_hw *hw = &interface->hw;
972 	int i, err;
973 
974 	/* We do not allow change in unsupported parameters */
975 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
976 		return -EOPNOTSUPP;
977 
978 	err = fm10k_set_reta(netdev, indir);
979 	if (err || !key)
980 		return err;
981 
982 	for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
983 		u32 rssrk = le32_to_cpu(*(__le32 *)key);
984 
985 		if (interface->rssrk[i] == rssrk)
986 			continue;
987 
988 		interface->rssrk[i] = rssrk;
989 		fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk);
990 	}
991 
992 	return 0;
993 }
994 
995 static unsigned int fm10k_max_channels(struct net_device *dev)
996 {
997 	struct fm10k_intfc *interface = netdev_priv(dev);
998 	unsigned int max_combined = interface->hw.mac.max_queues;
999 	u8 tcs = netdev_get_num_tc(dev);
1000 
1001 	/* For QoS report channels per traffic class */
1002 	if (tcs > 1)
1003 		max_combined = 1 << (fls(max_combined / tcs) - 1);
1004 
1005 	return max_combined;
1006 }
1007 
1008 static void fm10k_get_channels(struct net_device *dev,
1009 			       struct ethtool_channels *ch)
1010 {
1011 	struct fm10k_intfc *interface = netdev_priv(dev);
1012 	struct fm10k_hw *hw = &interface->hw;
1013 
1014 	/* report maximum channels */
1015 	ch->max_combined = fm10k_max_channels(dev);
1016 
1017 	/* report info for other vector */
1018 	ch->max_other = NON_Q_VECTORS(hw);
1019 	ch->other_count = ch->max_other;
1020 
1021 	/* record RSS queues */
1022 	ch->combined_count = interface->ring_feature[RING_F_RSS].indices;
1023 }
1024 
1025 static int fm10k_set_channels(struct net_device *dev,
1026 			      struct ethtool_channels *ch)
1027 {
1028 	struct fm10k_intfc *interface = netdev_priv(dev);
1029 	unsigned int count = ch->combined_count;
1030 	struct fm10k_hw *hw = &interface->hw;
1031 
1032 	/* verify they are not requesting separate vectors */
1033 	if (!count || ch->rx_count || ch->tx_count)
1034 		return -EINVAL;
1035 
1036 	/* verify other_count has not changed */
1037 	if (ch->other_count != NON_Q_VECTORS(hw))
1038 		return -EINVAL;
1039 
1040 	/* verify the number of channels does not exceed hardware limits */
1041 	if (count > fm10k_max_channels(dev))
1042 		return -EINVAL;
1043 
1044 	interface->ring_feature[RING_F_RSS].limit = count;
1045 
1046 	/* use setup TC to update any traffic class queue mapping */
1047 	return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
1048 }
1049 
1050 static int fm10k_get_ts_info(struct net_device *dev,
1051 			     struct ethtool_ts_info *info)
1052 {
1053 	struct fm10k_intfc *interface = netdev_priv(dev);
1054 
1055 	info->so_timestamping =
1056 		SOF_TIMESTAMPING_TX_SOFTWARE |
1057 		SOF_TIMESTAMPING_RX_SOFTWARE |
1058 		SOF_TIMESTAMPING_SOFTWARE |
1059 		SOF_TIMESTAMPING_TX_HARDWARE |
1060 		SOF_TIMESTAMPING_RX_HARDWARE |
1061 		SOF_TIMESTAMPING_RAW_HARDWARE;
1062 
1063 	if (interface->ptp_clock)
1064 		info->phc_index = ptp_clock_index(interface->ptp_clock);
1065 	else
1066 		info->phc_index = -1;
1067 
1068 	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1069 			 (1 << HWTSTAMP_TX_ON);
1070 
1071 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1072 			   (1 << HWTSTAMP_FILTER_ALL);
1073 
1074 	return 0;
1075 }
1076 
1077 static const struct ethtool_ops fm10k_ethtool_ops = {
1078 	.get_strings		= fm10k_get_strings,
1079 	.get_sset_count		= fm10k_get_sset_count,
1080 	.get_ethtool_stats      = fm10k_get_ethtool_stats,
1081 	.get_drvinfo		= fm10k_get_drvinfo,
1082 	.get_link		= ethtool_op_get_link,
1083 	.get_pauseparam		= fm10k_get_pauseparam,
1084 	.set_pauseparam		= fm10k_set_pauseparam,
1085 	.get_msglevel		= fm10k_get_msglevel,
1086 	.set_msglevel		= fm10k_set_msglevel,
1087 	.get_ringparam		= fm10k_get_ringparam,
1088 	.set_ringparam		= fm10k_set_ringparam,
1089 	.get_coalesce		= fm10k_get_coalesce,
1090 	.set_coalesce		= fm10k_set_coalesce,
1091 	.get_rxnfc		= fm10k_get_rxnfc,
1092 	.set_rxnfc		= fm10k_set_rxnfc,
1093 	.get_regs               = fm10k_get_regs,
1094 	.get_regs_len           = fm10k_get_regs_len,
1095 	.self_test		= fm10k_self_test,
1096 	.get_rxfh_indir_size	= fm10k_get_reta_size,
1097 	.get_rxfh_key_size	= fm10k_get_rssrk_size,
1098 	.get_rxfh		= fm10k_get_rssh,
1099 	.set_rxfh		= fm10k_set_rssh,
1100 	.get_channels		= fm10k_get_channels,
1101 	.set_channels		= fm10k_set_channels,
1102 	.get_ts_info            = fm10k_get_ts_info,
1103 };
1104 
1105 void fm10k_set_ethtool_ops(struct net_device *dev)
1106 {
1107 	dev->ethtool_ops = &fm10k_ethtool_ops;
1108 }
1109