1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/pci.h>
34 
35 #include "ena_netdev.h"
36 
37 struct ena_stats {
38 	char name[ETH_GSTRING_LEN];
39 	int stat_offset;
40 };
41 
42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
43 	.name = #stat, \
44 	.stat_offset = offsetof(struct ena_com_stats_admin, stat) \
45 }
46 
47 #define ENA_STAT_ENTRY(stat, stat_type) { \
48 	.name = #stat, \
49 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
50 }
51 
52 #define ENA_STAT_RX_ENTRY(stat) \
53 	ENA_STAT_ENTRY(stat, rx)
54 
55 #define ENA_STAT_TX_ENTRY(stat) \
56 	ENA_STAT_ENTRY(stat, tx)
57 
58 #define ENA_STAT_GLOBAL_ENTRY(stat) \
59 	ENA_STAT_ENTRY(stat, dev)
60 
61 static const struct ena_stats ena_stats_global_strings[] = {
62 	ENA_STAT_GLOBAL_ENTRY(tx_timeout),
63 	ENA_STAT_GLOBAL_ENTRY(suspend),
64 	ENA_STAT_GLOBAL_ENTRY(resume),
65 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
66 	ENA_STAT_GLOBAL_ENTRY(interface_up),
67 	ENA_STAT_GLOBAL_ENTRY(interface_down),
68 	ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
69 };
70 
71 static const struct ena_stats ena_stats_tx_strings[] = {
72 	ENA_STAT_TX_ENTRY(cnt),
73 	ENA_STAT_TX_ENTRY(bytes),
74 	ENA_STAT_TX_ENTRY(queue_stop),
75 	ENA_STAT_TX_ENTRY(queue_wakeup),
76 	ENA_STAT_TX_ENTRY(dma_mapping_err),
77 	ENA_STAT_TX_ENTRY(linearize),
78 	ENA_STAT_TX_ENTRY(linearize_failed),
79 	ENA_STAT_TX_ENTRY(napi_comp),
80 	ENA_STAT_TX_ENTRY(tx_poll),
81 	ENA_STAT_TX_ENTRY(doorbells),
82 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
83 	ENA_STAT_TX_ENTRY(bad_req_id),
84 	ENA_STAT_TX_ENTRY(llq_buffer_copy),
85 	ENA_STAT_TX_ENTRY(missed_tx),
86 	ENA_STAT_TX_ENTRY(unmask_interrupt),
87 };
88 
89 static const struct ena_stats ena_stats_rx_strings[] = {
90 	ENA_STAT_RX_ENTRY(cnt),
91 	ENA_STAT_RX_ENTRY(bytes),
92 	ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
93 	ENA_STAT_RX_ENTRY(csum_good),
94 	ENA_STAT_RX_ENTRY(refil_partial),
95 	ENA_STAT_RX_ENTRY(bad_csum),
96 	ENA_STAT_RX_ENTRY(page_alloc_fail),
97 	ENA_STAT_RX_ENTRY(skb_alloc_fail),
98 	ENA_STAT_RX_ENTRY(dma_mapping_err),
99 	ENA_STAT_RX_ENTRY(bad_desc_num),
100 	ENA_STAT_RX_ENTRY(bad_req_id),
101 	ENA_STAT_RX_ENTRY(empty_rx_ring),
102 	ENA_STAT_RX_ENTRY(csum_unchecked),
103 };
104 
105 static const struct ena_stats ena_stats_ena_com_strings[] = {
106 	ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
107 	ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
108 	ENA_STAT_ENA_COM_ENTRY(completed_cmd),
109 	ENA_STAT_ENA_COM_ENTRY(out_of_space),
110 	ENA_STAT_ENA_COM_ENTRY(no_completion),
111 };
112 
113 #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
114 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
115 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
116 #define ENA_STATS_ARRAY_ENA_COM	ARRAY_SIZE(ena_stats_ena_com_strings)
117 
118 static void ena_safe_update_stat(u64 *src, u64 *dst,
119 				 struct u64_stats_sync *syncp)
120 {
121 	unsigned int start;
122 
123 	do {
124 		start = u64_stats_fetch_begin_irq(syncp);
125 		*(dst) = *src;
126 	} while (u64_stats_fetch_retry_irq(syncp, start));
127 }
128 
129 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
130 {
131 	const struct ena_stats *ena_stats;
132 	struct ena_ring *ring;
133 
134 	u64 *ptr;
135 	int i, j;
136 
137 	for (i = 0; i < adapter->num_io_queues; i++) {
138 		/* Tx stats */
139 		ring = &adapter->tx_ring[i];
140 
141 		for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
142 			ena_stats = &ena_stats_tx_strings[j];
143 
144 			ptr = (u64 *)((uintptr_t)&ring->tx_stats +
145 				(uintptr_t)ena_stats->stat_offset);
146 
147 			ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
148 		}
149 
150 		/* Rx stats */
151 		ring = &adapter->rx_ring[i];
152 
153 		for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
154 			ena_stats = &ena_stats_rx_strings[j];
155 
156 			ptr = (u64 *)((uintptr_t)&ring->rx_stats +
157 				(uintptr_t)ena_stats->stat_offset);
158 
159 			ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
160 		}
161 	}
162 }
163 
164 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
165 {
166 	const struct ena_stats *ena_stats;
167 	u32 *ptr;
168 	int i;
169 
170 	for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
171 		ena_stats = &ena_stats_ena_com_strings[i];
172 
173 		ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
174 			(uintptr_t)ena_stats->stat_offset);
175 
176 		*(*data)++ = *ptr;
177 	}
178 }
179 
180 static void ena_get_ethtool_stats(struct net_device *netdev,
181 				  struct ethtool_stats *stats,
182 				  u64 *data)
183 {
184 	struct ena_adapter *adapter = netdev_priv(netdev);
185 	const struct ena_stats *ena_stats;
186 	u64 *ptr;
187 	int i;
188 
189 	for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
190 		ena_stats = &ena_stats_global_strings[i];
191 
192 		ptr = (u64 *)((uintptr_t)&adapter->dev_stats +
193 			(uintptr_t)ena_stats->stat_offset);
194 
195 		ena_safe_update_stat(ptr, data++, &adapter->syncp);
196 	}
197 
198 	ena_queue_stats(adapter, &data);
199 	ena_dev_admin_queue_stats(adapter, &data);
200 }
201 
202 int ena_get_sset_count(struct net_device *netdev, int sset)
203 {
204 	struct ena_adapter *adapter = netdev_priv(netdev);
205 
206 	if (sset != ETH_SS_STATS)
207 		return -EOPNOTSUPP;
208 
209 	return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
210 		+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
211 }
212 
213 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
214 {
215 	const struct ena_stats *ena_stats;
216 	int i, j;
217 
218 	for (i = 0; i < adapter->num_io_queues; i++) {
219 		/* Tx stats */
220 		for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
221 			ena_stats = &ena_stats_tx_strings[j];
222 
223 			snprintf(*data, ETH_GSTRING_LEN,
224 				 "queue_%u_tx_%s", i, ena_stats->name);
225 			(*data) += ETH_GSTRING_LEN;
226 		}
227 		/* Rx stats */
228 		for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
229 			ena_stats = &ena_stats_rx_strings[j];
230 
231 			snprintf(*data, ETH_GSTRING_LEN,
232 				 "queue_%u_rx_%s", i, ena_stats->name);
233 			(*data) += ETH_GSTRING_LEN;
234 		}
235 	}
236 }
237 
238 static void ena_com_dev_strings(u8 **data)
239 {
240 	const struct ena_stats *ena_stats;
241 	int i;
242 
243 	for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
244 		ena_stats = &ena_stats_ena_com_strings[i];
245 
246 		snprintf(*data, ETH_GSTRING_LEN,
247 			 "ena_admin_q_%s", ena_stats->name);
248 		(*data) += ETH_GSTRING_LEN;
249 	}
250 }
251 
252 static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
253 {
254 	struct ena_adapter *adapter = netdev_priv(netdev);
255 	const struct ena_stats *ena_stats;
256 	int i;
257 
258 	if (sset != ETH_SS_STATS)
259 		return;
260 
261 	for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
262 		ena_stats = &ena_stats_global_strings[i];
263 		memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
264 		data += ETH_GSTRING_LEN;
265 	}
266 
267 	ena_queue_strings(adapter, &data);
268 	ena_com_dev_strings(&data);
269 }
270 
271 static int ena_get_link_ksettings(struct net_device *netdev,
272 				  struct ethtool_link_ksettings *link_ksettings)
273 {
274 	struct ena_adapter *adapter = netdev_priv(netdev);
275 	struct ena_com_dev *ena_dev = adapter->ena_dev;
276 	struct ena_admin_get_feature_link_desc *link;
277 	struct ena_admin_get_feat_resp feat_resp;
278 	int rc;
279 
280 	rc = ena_com_get_link_params(ena_dev, &feat_resp);
281 	if (rc)
282 		return rc;
283 
284 	link = &feat_resp.u.link;
285 	link_ksettings->base.speed = link->speed;
286 
287 	if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) {
288 		ethtool_link_ksettings_add_link_mode(link_ksettings,
289 						     supported, Autoneg);
290 		ethtool_link_ksettings_add_link_mode(link_ksettings,
291 						     supported, Autoneg);
292 	}
293 
294 	link_ksettings->base.autoneg =
295 		(link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ?
296 		AUTONEG_ENABLE : AUTONEG_DISABLE;
297 
298 	link_ksettings->base.duplex = DUPLEX_FULL;
299 
300 	return 0;
301 }
302 
303 static int ena_get_coalesce(struct net_device *net_dev,
304 			    struct ethtool_coalesce *coalesce)
305 {
306 	struct ena_adapter *adapter = netdev_priv(net_dev);
307 	struct ena_com_dev *ena_dev = adapter->ena_dev;
308 
309 	if (!ena_com_interrupt_moderation_supported(ena_dev))
310 		return -EOPNOTSUPP;
311 
312 	coalesce->tx_coalesce_usecs =
313 		ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
314 			ena_dev->intr_delay_resolution;
315 
316 	coalesce->rx_coalesce_usecs =
317 		ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
318 		* ena_dev->intr_delay_resolution;
319 
320 	coalesce->use_adaptive_rx_coalesce =
321 		ena_com_get_adaptive_moderation_enabled(ena_dev);
322 
323 	return 0;
324 }
325 
326 static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
327 {
328 	unsigned int val;
329 	int i;
330 
331 	val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
332 
333 	for (i = 0; i < adapter->num_io_queues; i++)
334 		adapter->tx_ring[i].smoothed_interval = val;
335 }
336 
337 static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
338 {
339 	unsigned int val;
340 	int i;
341 
342 	val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
343 
344 	for (i = 0; i < adapter->num_io_queues; i++)
345 		adapter->rx_ring[i].smoothed_interval = val;
346 }
347 
348 static int ena_set_coalesce(struct net_device *net_dev,
349 			    struct ethtool_coalesce *coalesce)
350 {
351 	struct ena_adapter *adapter = netdev_priv(net_dev);
352 	struct ena_com_dev *ena_dev = adapter->ena_dev;
353 	int rc;
354 
355 	if (!ena_com_interrupt_moderation_supported(ena_dev))
356 		return -EOPNOTSUPP;
357 
358 	rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
359 							       coalesce->tx_coalesce_usecs);
360 	if (rc)
361 		return rc;
362 
363 	ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
364 
365 	rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
366 							       coalesce->rx_coalesce_usecs);
367 	if (rc)
368 		return rc;
369 
370 	ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
371 
372 	if (coalesce->use_adaptive_rx_coalesce &&
373 	    !ena_com_get_adaptive_moderation_enabled(ena_dev))
374 		ena_com_enable_adaptive_moderation(ena_dev);
375 
376 	if (!coalesce->use_adaptive_rx_coalesce &&
377 	    ena_com_get_adaptive_moderation_enabled(ena_dev))
378 		ena_com_disable_adaptive_moderation(ena_dev);
379 
380 	return 0;
381 }
382 
383 static u32 ena_get_msglevel(struct net_device *netdev)
384 {
385 	struct ena_adapter *adapter = netdev_priv(netdev);
386 
387 	return adapter->msg_enable;
388 }
389 
390 static void ena_set_msglevel(struct net_device *netdev, u32 value)
391 {
392 	struct ena_adapter *adapter = netdev_priv(netdev);
393 
394 	adapter->msg_enable = value;
395 }
396 
397 static void ena_get_drvinfo(struct net_device *dev,
398 			    struct ethtool_drvinfo *info)
399 {
400 	struct ena_adapter *adapter = netdev_priv(dev);
401 
402 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
403 	strlcpy(info->bus_info, pci_name(adapter->pdev),
404 		sizeof(info->bus_info));
405 }
406 
407 static void ena_get_ringparam(struct net_device *netdev,
408 			      struct ethtool_ringparam *ring)
409 {
410 	struct ena_adapter *adapter = netdev_priv(netdev);
411 
412 	ring->tx_max_pending = adapter->max_tx_ring_size;
413 	ring->rx_max_pending = adapter->max_rx_ring_size;
414 	ring->tx_pending = adapter->tx_ring[0].ring_size;
415 	ring->rx_pending = adapter->rx_ring[0].ring_size;
416 }
417 
418 static int ena_set_ringparam(struct net_device *netdev,
419 			     struct ethtool_ringparam *ring)
420 {
421 	struct ena_adapter *adapter = netdev_priv(netdev);
422 	u32 new_tx_size, new_rx_size;
423 
424 	new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
425 			ENA_MIN_RING_SIZE : ring->tx_pending;
426 	new_tx_size = rounddown_pow_of_two(new_tx_size);
427 
428 	new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
429 			ENA_MIN_RING_SIZE : ring->rx_pending;
430 	new_rx_size = rounddown_pow_of_two(new_rx_size);
431 
432 	if (new_tx_size == adapter->requested_tx_ring_size &&
433 	    new_rx_size == adapter->requested_rx_ring_size)
434 		return 0;
435 
436 	return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
437 }
438 
439 static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
440 {
441 	u32 data = 0;
442 
443 	if (hash_fields & ENA_ADMIN_RSS_L2_DA)
444 		data |= RXH_L2DA;
445 
446 	if (hash_fields & ENA_ADMIN_RSS_L3_DA)
447 		data |= RXH_IP_DST;
448 
449 	if (hash_fields & ENA_ADMIN_RSS_L3_SA)
450 		data |= RXH_IP_SRC;
451 
452 	if (hash_fields & ENA_ADMIN_RSS_L4_DP)
453 		data |= RXH_L4_B_2_3;
454 
455 	if (hash_fields & ENA_ADMIN_RSS_L4_SP)
456 		data |= RXH_L4_B_0_1;
457 
458 	return data;
459 }
460 
461 static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
462 {
463 	u16 data = 0;
464 
465 	if (hash_fields & RXH_L2DA)
466 		data |= ENA_ADMIN_RSS_L2_DA;
467 
468 	if (hash_fields & RXH_IP_DST)
469 		data |= ENA_ADMIN_RSS_L3_DA;
470 
471 	if (hash_fields & RXH_IP_SRC)
472 		data |= ENA_ADMIN_RSS_L3_SA;
473 
474 	if (hash_fields & RXH_L4_B_2_3)
475 		data |= ENA_ADMIN_RSS_L4_DP;
476 
477 	if (hash_fields & RXH_L4_B_0_1)
478 		data |= ENA_ADMIN_RSS_L4_SP;
479 
480 	return data;
481 }
482 
483 static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
484 			    struct ethtool_rxnfc *cmd)
485 {
486 	enum ena_admin_flow_hash_proto proto;
487 	u16 hash_fields;
488 	int rc;
489 
490 	cmd->data = 0;
491 
492 	switch (cmd->flow_type) {
493 	case TCP_V4_FLOW:
494 		proto = ENA_ADMIN_RSS_TCP4;
495 		break;
496 	case UDP_V4_FLOW:
497 		proto = ENA_ADMIN_RSS_UDP4;
498 		break;
499 	case TCP_V6_FLOW:
500 		proto = ENA_ADMIN_RSS_TCP6;
501 		break;
502 	case UDP_V6_FLOW:
503 		proto = ENA_ADMIN_RSS_UDP6;
504 		break;
505 	case IPV4_FLOW:
506 		proto = ENA_ADMIN_RSS_IP4;
507 		break;
508 	case IPV6_FLOW:
509 		proto = ENA_ADMIN_RSS_IP6;
510 		break;
511 	case ETHER_FLOW:
512 		proto = ENA_ADMIN_RSS_NOT_IP;
513 		break;
514 	case AH_V4_FLOW:
515 	case ESP_V4_FLOW:
516 	case AH_V6_FLOW:
517 	case ESP_V6_FLOW:
518 	case SCTP_V4_FLOW:
519 	case AH_ESP_V4_FLOW:
520 		return -EOPNOTSUPP;
521 	default:
522 		return -EINVAL;
523 	}
524 
525 	rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
526 	if (rc)
527 		return rc;
528 
529 	cmd->data = ena_flow_hash_to_flow_type(hash_fields);
530 
531 	return 0;
532 }
533 
534 static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
535 			    struct ethtool_rxnfc *cmd)
536 {
537 	enum ena_admin_flow_hash_proto proto;
538 	u16 hash_fields;
539 
540 	switch (cmd->flow_type) {
541 	case TCP_V4_FLOW:
542 		proto = ENA_ADMIN_RSS_TCP4;
543 		break;
544 	case UDP_V4_FLOW:
545 		proto = ENA_ADMIN_RSS_UDP4;
546 		break;
547 	case TCP_V6_FLOW:
548 		proto = ENA_ADMIN_RSS_TCP6;
549 		break;
550 	case UDP_V6_FLOW:
551 		proto = ENA_ADMIN_RSS_UDP6;
552 		break;
553 	case IPV4_FLOW:
554 		proto = ENA_ADMIN_RSS_IP4;
555 		break;
556 	case IPV6_FLOW:
557 		proto = ENA_ADMIN_RSS_IP6;
558 		break;
559 	case ETHER_FLOW:
560 		proto = ENA_ADMIN_RSS_NOT_IP;
561 		break;
562 	case AH_V4_FLOW:
563 	case ESP_V4_FLOW:
564 	case AH_V6_FLOW:
565 	case ESP_V6_FLOW:
566 	case SCTP_V4_FLOW:
567 	case AH_ESP_V4_FLOW:
568 		return -EOPNOTSUPP;
569 	default:
570 		return -EINVAL;
571 	}
572 
573 	hash_fields = ena_flow_data_to_flow_hash(cmd->data);
574 
575 	return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
576 }
577 
578 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
579 {
580 	struct ena_adapter *adapter = netdev_priv(netdev);
581 	int rc = 0;
582 
583 	switch (info->cmd) {
584 	case ETHTOOL_SRXFH:
585 		rc = ena_set_rss_hash(adapter->ena_dev, info);
586 		break;
587 	case ETHTOOL_SRXCLSRLDEL:
588 	case ETHTOOL_SRXCLSRLINS:
589 	default:
590 		netif_err(adapter, drv, netdev,
591 			  "Command parameter %d is not supported\n", info->cmd);
592 		rc = -EOPNOTSUPP;
593 	}
594 
595 	return rc;
596 }
597 
598 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
599 			 u32 *rules)
600 {
601 	struct ena_adapter *adapter = netdev_priv(netdev);
602 	int rc = 0;
603 
604 	switch (info->cmd) {
605 	case ETHTOOL_GRXRINGS:
606 		info->data = adapter->num_io_queues;
607 		rc = 0;
608 		break;
609 	case ETHTOOL_GRXFH:
610 		rc = ena_get_rss_hash(adapter->ena_dev, info);
611 		break;
612 	case ETHTOOL_GRXCLSRLCNT:
613 	case ETHTOOL_GRXCLSRULE:
614 	case ETHTOOL_GRXCLSRLALL:
615 	default:
616 		netif_err(adapter, drv, netdev,
617 			  "Command parameter %d is not supported\n", info->cmd);
618 		rc = -EOPNOTSUPP;
619 	}
620 
621 	return rc;
622 }
623 
624 static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
625 {
626 	return ENA_RX_RSS_TABLE_SIZE;
627 }
628 
629 static u32 ena_get_rxfh_key_size(struct net_device *netdev)
630 {
631 	return ENA_HASH_KEY_SIZE;
632 }
633 
634 static int ena_indirection_table_set(struct ena_adapter *adapter,
635 				     const u32 *indir)
636 {
637 	struct ena_com_dev *ena_dev = adapter->ena_dev;
638 	int i, rc;
639 
640 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
641 		rc = ena_com_indirect_table_fill_entry(ena_dev,
642 						       i,
643 						       ENA_IO_RXQ_IDX(indir[i]));
644 		if (unlikely(rc)) {
645 			netif_err(adapter, drv, adapter->netdev,
646 				  "Cannot fill indirect table (index is too large)\n");
647 			return rc;
648 		}
649 	}
650 
651 	rc = ena_com_indirect_table_set(ena_dev);
652 	if (rc) {
653 		netif_err(adapter, drv, adapter->netdev,
654 			  "Cannot set indirect table\n");
655 		return rc == -EPERM ? -EOPNOTSUPP : rc;
656 	}
657 	return rc;
658 }
659 
660 static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
661 {
662 	struct ena_com_dev *ena_dev = adapter->ena_dev;
663 	int i, rc;
664 
665 	if (!indir)
666 		return 0;
667 
668 	rc = ena_com_indirect_table_get(ena_dev, indir);
669 	if (rc)
670 		return rc;
671 
672 	/* Our internal representation of the indices is: even indices
673 	 * for Tx and uneven indices for Rx. We need to convert the Rx
674 	 * indices to be consecutive
675 	 */
676 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
677 		indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
678 
679 	return rc;
680 }
681 
682 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
683 			u8 *hfunc)
684 {
685 	struct ena_adapter *adapter = netdev_priv(netdev);
686 	enum ena_admin_hash_functions ena_func;
687 	u8 func;
688 	int rc;
689 
690 	rc = ena_indirection_table_get(adapter, indir);
691 	if (rc)
692 		return rc;
693 
694 	/* We call this function in order to check if the device
695 	 * supports getting/setting the hash function.
696 	 */
697 	rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
698 	if (rc) {
699 		if (rc == -EOPNOTSUPP)
700 			rc = 0;
701 
702 		return rc;
703 	}
704 
705 	rc = ena_com_get_hash_key(adapter->ena_dev, key);
706 	if (rc)
707 		return rc;
708 
709 	switch (ena_func) {
710 	case ENA_ADMIN_TOEPLITZ:
711 		func = ETH_RSS_HASH_TOP;
712 		break;
713 	case ENA_ADMIN_CRC32:
714 		func = ETH_RSS_HASH_CRC32;
715 		break;
716 	default:
717 		netif_err(adapter, drv, netdev,
718 			  "Command parameter is not supported\n");
719 		return -EOPNOTSUPP;
720 	}
721 
722 	if (hfunc)
723 		*hfunc = func;
724 
725 	return 0;
726 }
727 
728 static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
729 			const u8 *key, const u8 hfunc)
730 {
731 	struct ena_adapter *adapter = netdev_priv(netdev);
732 	struct ena_com_dev *ena_dev = adapter->ena_dev;
733 	enum ena_admin_hash_functions func = 0;
734 	int rc;
735 
736 	if (indir) {
737 		rc = ena_indirection_table_set(adapter, indir);
738 		if (rc)
739 			return rc;
740 	}
741 
742 	switch (hfunc) {
743 	case ETH_RSS_HASH_NO_CHANGE:
744 		func = ena_com_get_current_hash_function(ena_dev);
745 		break;
746 	case ETH_RSS_HASH_TOP:
747 		func = ENA_ADMIN_TOEPLITZ;
748 		break;
749 	case ETH_RSS_HASH_CRC32:
750 		func = ENA_ADMIN_CRC32;
751 		break;
752 	default:
753 		netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
754 			  hfunc);
755 		return -EOPNOTSUPP;
756 	}
757 
758 	if (key || func) {
759 		rc = ena_com_fill_hash_function(ena_dev, func, key,
760 						ENA_HASH_KEY_SIZE,
761 						0xFFFFFFFF);
762 		if (unlikely(rc)) {
763 			netif_err(adapter, drv, netdev, "Cannot fill key\n");
764 			return rc == -EPERM ? -EOPNOTSUPP : rc;
765 		}
766 	}
767 
768 	return 0;
769 }
770 
771 static void ena_get_channels(struct net_device *netdev,
772 			     struct ethtool_channels *channels)
773 {
774 	struct ena_adapter *adapter = netdev_priv(netdev);
775 
776 	channels->max_combined = adapter->max_num_io_queues;
777 	channels->combined_count = adapter->num_io_queues;
778 }
779 
780 static int ena_set_channels(struct net_device *netdev,
781 			    struct ethtool_channels *channels)
782 {
783 	struct ena_adapter *adapter = netdev_priv(netdev);
784 	u32 count = channels->combined_count;
785 	/* The check for max value is already done in ethtool */
786 	if (count < ENA_MIN_NUM_IO_QUEUES ||
787 	    (ena_xdp_present(adapter) &&
788 	    !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
789 		return -EINVAL;
790 
791 	return ena_update_queue_count(adapter, count);
792 }
793 
794 static int ena_get_tunable(struct net_device *netdev,
795 			   const struct ethtool_tunable *tuna, void *data)
796 {
797 	struct ena_adapter *adapter = netdev_priv(netdev);
798 	int ret = 0;
799 
800 	switch (tuna->id) {
801 	case ETHTOOL_RX_COPYBREAK:
802 		*(u32 *)data = adapter->rx_copybreak;
803 		break;
804 	default:
805 		ret = -EINVAL;
806 		break;
807 	}
808 
809 	return ret;
810 }
811 
812 static int ena_set_tunable(struct net_device *netdev,
813 			   const struct ethtool_tunable *tuna,
814 			   const void *data)
815 {
816 	struct ena_adapter *adapter = netdev_priv(netdev);
817 	int ret = 0;
818 	u32 len;
819 
820 	switch (tuna->id) {
821 	case ETHTOOL_RX_COPYBREAK:
822 		len = *(u32 *)data;
823 		if (len > adapter->netdev->mtu) {
824 			ret = -EINVAL;
825 			break;
826 		}
827 		adapter->rx_copybreak = len;
828 		break;
829 	default:
830 		ret = -EINVAL;
831 		break;
832 	}
833 
834 	return ret;
835 }
836 
837 static const struct ethtool_ops ena_ethtool_ops = {
838 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
839 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
840 	.get_link_ksettings	= ena_get_link_ksettings,
841 	.get_drvinfo		= ena_get_drvinfo,
842 	.get_msglevel		= ena_get_msglevel,
843 	.set_msglevel		= ena_set_msglevel,
844 	.get_link		= ethtool_op_get_link,
845 	.get_coalesce		= ena_get_coalesce,
846 	.set_coalesce		= ena_set_coalesce,
847 	.get_ringparam		= ena_get_ringparam,
848 	.set_ringparam		= ena_set_ringparam,
849 	.get_sset_count         = ena_get_sset_count,
850 	.get_strings		= ena_get_strings,
851 	.get_ethtool_stats      = ena_get_ethtool_stats,
852 	.get_rxnfc		= ena_get_rxnfc,
853 	.set_rxnfc		= ena_set_rxnfc,
854 	.get_rxfh_indir_size    = ena_get_rxfh_indir_size,
855 	.get_rxfh_key_size	= ena_get_rxfh_key_size,
856 	.get_rxfh		= ena_get_rxfh,
857 	.set_rxfh		= ena_set_rxfh,
858 	.get_channels		= ena_get_channels,
859 	.set_channels		= ena_set_channels,
860 	.get_tunable		= ena_get_tunable,
861 	.set_tunable		= ena_set_tunable,
862 	.get_ts_info            = ethtool_op_get_ts_info,
863 };
864 
865 void ena_set_ethtool_ops(struct net_device *netdev)
866 {
867 	netdev->ethtool_ops = &ena_ethtool_ops;
868 }
869 
870 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
871 {
872 	struct net_device *netdev = adapter->netdev;
873 	u8 *strings_buf;
874 	u64 *data_buf;
875 	int strings_num;
876 	int i, rc;
877 
878 	strings_num = ena_get_sset_count(netdev, ETH_SS_STATS);
879 	if (strings_num <= 0) {
880 		netif_err(adapter, drv, netdev, "Can't get stats num\n");
881 		return;
882 	}
883 
884 	strings_buf = devm_kcalloc(&adapter->pdev->dev,
885 				   ETH_GSTRING_LEN, strings_num,
886 				   GFP_ATOMIC);
887 	if (!strings_buf) {
888 		netif_err(adapter, drv, netdev,
889 			  "failed to alloc strings_buf\n");
890 		return;
891 	}
892 
893 	data_buf = devm_kcalloc(&adapter->pdev->dev,
894 				strings_num, sizeof(u64),
895 				GFP_ATOMIC);
896 	if (!data_buf) {
897 		netif_err(adapter, drv, netdev,
898 			  "failed to allocate data buf\n");
899 		devm_kfree(&adapter->pdev->dev, strings_buf);
900 		return;
901 	}
902 
903 	ena_get_strings(netdev, ETH_SS_STATS, strings_buf);
904 	ena_get_ethtool_stats(netdev, NULL, data_buf);
905 
906 	/* If there is a buffer, dump stats, otherwise print them to dmesg */
907 	if (buf)
908 		for (i = 0; i < strings_num; i++) {
909 			rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64),
910 				      "%s %llu\n",
911 				      strings_buf + i * ETH_GSTRING_LEN,
912 				      data_buf[i]);
913 			buf += rc;
914 		}
915 	else
916 		for (i = 0; i < strings_num; i++)
917 			netif_err(adapter, drv, netdev, "%s: %llu\n",
918 				  strings_buf + i * ETH_GSTRING_LEN,
919 				  data_buf[i]);
920 
921 	devm_kfree(&adapter->pdev->dev, strings_buf);
922 	devm_kfree(&adapter->pdev->dev, data_buf);
923 }
924 
925 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
926 {
927 	if (!buf)
928 		return;
929 
930 	ena_dump_stats_ex(adapter, buf);
931 }
932 
933 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter)
934 {
935 	ena_dump_stats_ex(adapter, NULL);
936 }
937