xref: /openbmc/linux/drivers/net/ethernet/google/gve/gve_ethtool.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  /* Google virtual Ethernet (gve) driver
3   *
4   * Copyright (C) 2015-2021 Google, Inc.
5   */
6  
7  #include <linux/ethtool.h>
8  #include <linux/rtnetlink.h>
9  #include "gve.h"
10  #include "gve_adminq.h"
11  #include "gve_dqo.h"
12  
gve_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)13  static void gve_get_drvinfo(struct net_device *netdev,
14  			    struct ethtool_drvinfo *info)
15  {
16  	struct gve_priv *priv = netdev_priv(netdev);
17  
18  	strscpy(info->driver, gve_driver_name, sizeof(info->driver));
19  	strscpy(info->version, gve_version_str, sizeof(info->version));
20  	strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
21  }
22  
gve_set_msglevel(struct net_device * netdev,u32 value)23  static void gve_set_msglevel(struct net_device *netdev, u32 value)
24  {
25  	struct gve_priv *priv = netdev_priv(netdev);
26  
27  	priv->msg_enable = value;
28  }
29  
gve_get_msglevel(struct net_device * netdev)30  static u32 gve_get_msglevel(struct net_device *netdev)
31  {
32  	struct gve_priv *priv = netdev_priv(netdev);
33  
34  	return priv->msg_enable;
35  }
36  
37  /* For the following stats column string names, make sure the order
38   * matches how it is filled in the code. For xdp_aborted, xdp_drop,
39   * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
40   * as declared in enum xdp_action inside file uapi/linux/bpf.h .
41   */
42  static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
43  	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
44  	"rx_dropped", "tx_dropped", "tx_timeouts",
45  	"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
46  	"interface_up_cnt", "interface_down_cnt", "reset_cnt",
47  	"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
48  };
49  
50  static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
51  	"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
52  	"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
53  	"rx_frag_alloc_cnt[%u]",
54  	"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
55  	"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
56  	"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
57  	"rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
58  	"rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
59  	"rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
60  };
61  
62  static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
63  	"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
64  	"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
65  	"tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
66  	"tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
67  };
68  
69  static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
70  	"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
71  	"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
72  	"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
73  	"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
74  	"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
75  	"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
76  	"adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
77  };
78  
79  static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
80  	"report-stats",
81  };
82  
83  #define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
84  #define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
85  #define NUM_GVE_TX_CNTS	ARRAY_SIZE(gve_gstrings_tx_stats)
86  #define NUM_GVE_RX_CNTS	ARRAY_SIZE(gve_gstrings_rx_stats)
87  #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
88  
gve_get_strings(struct net_device * netdev,u32 stringset,u8 * data)89  static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
90  {
91  	struct gve_priv *priv = netdev_priv(netdev);
92  	char *s = (char *)data;
93  	int num_tx_queues;
94  	int i, j;
95  
96  	num_tx_queues = gve_num_tx_queues(priv);
97  	switch (stringset) {
98  	case ETH_SS_STATS:
99  		memcpy(s, *gve_gstrings_main_stats,
100  		       sizeof(gve_gstrings_main_stats));
101  		s += sizeof(gve_gstrings_main_stats);
102  
103  		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
104  			for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
105  				snprintf(s, ETH_GSTRING_LEN,
106  					 gve_gstrings_rx_stats[j], i);
107  				s += ETH_GSTRING_LEN;
108  			}
109  		}
110  
111  		for (i = 0; i < num_tx_queues; i++) {
112  			for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
113  				snprintf(s, ETH_GSTRING_LEN,
114  					 gve_gstrings_tx_stats[j], i);
115  				s += ETH_GSTRING_LEN;
116  			}
117  		}
118  
119  		memcpy(s, *gve_gstrings_adminq_stats,
120  		       sizeof(gve_gstrings_adminq_stats));
121  		s += sizeof(gve_gstrings_adminq_stats);
122  		break;
123  
124  	case ETH_SS_PRIV_FLAGS:
125  		memcpy(s, *gve_gstrings_priv_flags,
126  		       sizeof(gve_gstrings_priv_flags));
127  		s += sizeof(gve_gstrings_priv_flags);
128  		break;
129  
130  	default:
131  		break;
132  	}
133  }
134  
gve_get_sset_count(struct net_device * netdev,int sset)135  static int gve_get_sset_count(struct net_device *netdev, int sset)
136  {
137  	struct gve_priv *priv = netdev_priv(netdev);
138  	int num_tx_queues;
139  
140  	num_tx_queues = gve_num_tx_queues(priv);
141  	switch (sset) {
142  	case ETH_SS_STATS:
143  		return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
144  		       (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
145  		       (num_tx_queues * NUM_GVE_TX_CNTS);
146  	case ETH_SS_PRIV_FLAGS:
147  		return GVE_PRIV_FLAGS_STR_LEN;
148  	default:
149  		return -EOPNOTSUPP;
150  	}
151  }
152  
153  static void
gve_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)154  gve_get_ethtool_stats(struct net_device *netdev,
155  		      struct ethtool_stats *stats, u64 *data)
156  {
157  	u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
158  		tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
159  		tmp_tx_pkts, tmp_tx_bytes;
160  	u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
161  		rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
162  	int stats_idx, base_stats_idx, max_stats_idx;
163  	struct stats *report_stats;
164  	int *rx_qid_to_stats_idx;
165  	int *tx_qid_to_stats_idx;
166  	struct gve_priv *priv;
167  	bool skip_nic_stats;
168  	unsigned int start;
169  	int num_tx_queues;
170  	int ring;
171  	int i, j;
172  
173  	ASSERT_RTNL();
174  
175  	priv = netdev_priv(netdev);
176  	num_tx_queues = gve_num_tx_queues(priv);
177  	report_stats = priv->stats_report->stats;
178  	rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
179  					    sizeof(int), GFP_KERNEL);
180  	if (!rx_qid_to_stats_idx)
181  		return;
182  	tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
183  					    sizeof(int), GFP_KERNEL);
184  	if (!tx_qid_to_stats_idx) {
185  		kfree(rx_qid_to_stats_idx);
186  		return;
187  	}
188  	for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
189  	     rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
190  	     ring < priv->rx_cfg.num_queues; ring++) {
191  		if (priv->rx) {
192  			do {
193  				struct gve_rx_ring *rx = &priv->rx[ring];
194  
195  				start =
196  				  u64_stats_fetch_begin(&priv->rx[ring].statss);
197  				tmp_rx_pkts = rx->rpackets;
198  				tmp_rx_bytes = rx->rbytes;
199  				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
200  				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
201  				tmp_rx_desc_err_dropped_pkt =
202  					rx->rx_desc_err_dropped_pkt;
203  			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
204  						       start));
205  			rx_pkts += tmp_rx_pkts;
206  			rx_bytes += tmp_rx_bytes;
207  			rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
208  			rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
209  			rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
210  		}
211  	}
212  	for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
213  	     ring < num_tx_queues; ring++) {
214  		if (priv->tx) {
215  			do {
216  				start =
217  				  u64_stats_fetch_begin(&priv->tx[ring].statss);
218  				tmp_tx_pkts = priv->tx[ring].pkt_done;
219  				tmp_tx_bytes = priv->tx[ring].bytes_done;
220  			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
221  						       start));
222  			tx_pkts += tmp_tx_pkts;
223  			tx_bytes += tmp_tx_bytes;
224  			tx_dropped += priv->tx[ring].dropped_pkt;
225  		}
226  	}
227  
228  	i = 0;
229  	data[i++] = rx_pkts;
230  	data[i++] = tx_pkts;
231  	data[i++] = rx_bytes;
232  	data[i++] = tx_bytes;
233  	/* total rx dropped packets */
234  	data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
235  		    rx_desc_err_dropped_pkt;
236  	data[i++] = tx_dropped;
237  	data[i++] = priv->tx_timeo_cnt;
238  	data[i++] = rx_skb_alloc_fail;
239  	data[i++] = rx_buf_alloc_fail;
240  	data[i++] = rx_desc_err_dropped_pkt;
241  	data[i++] = priv->interface_up_cnt;
242  	data[i++] = priv->interface_down_cnt;
243  	data[i++] = priv->reset_cnt;
244  	data[i++] = priv->page_alloc_fail;
245  	data[i++] = priv->dma_mapping_error;
246  	data[i++] = priv->stats_report_trigger_cnt;
247  	i = GVE_MAIN_STATS_LEN;
248  
249  	/* For rx cross-reporting stats, start from nic rx stats in report */
250  	base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
251  		GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
252  	max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
253  		base_stats_idx;
254  	/* Preprocess the stats report for rx, map queue id to start index */
255  	skip_nic_stats = false;
256  	for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
257  		stats_idx += NIC_RX_STATS_REPORT_NUM) {
258  		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
259  		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
260  
261  		if (stat_name == 0) {
262  			/* no stats written by NIC yet */
263  			skip_nic_stats = true;
264  			break;
265  		}
266  		rx_qid_to_stats_idx[queue_id] = stats_idx;
267  	}
268  	/* walk RX rings */
269  	if (priv->rx) {
270  		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
271  			struct gve_rx_ring *rx = &priv->rx[ring];
272  
273  			data[i++] = rx->fill_cnt;
274  			data[i++] = rx->cnt;
275  			data[i++] = rx->fill_cnt - rx->cnt;
276  			do {
277  				start =
278  				  u64_stats_fetch_begin(&priv->rx[ring].statss);
279  				tmp_rx_bytes = rx->rbytes;
280  				tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
281  				tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
282  				tmp_rx_desc_err_dropped_pkt =
283  					rx->rx_desc_err_dropped_pkt;
284  			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
285  						       start));
286  			data[i++] = tmp_rx_bytes;
287  			data[i++] = rx->rx_cont_packet_cnt;
288  			data[i++] = rx->rx_frag_flip_cnt;
289  			data[i++] = rx->rx_frag_copy_cnt;
290  			data[i++] = rx->rx_frag_alloc_cnt;
291  			/* rx dropped packets */
292  			data[i++] = tmp_rx_skb_alloc_fail +
293  				tmp_rx_buf_alloc_fail +
294  				tmp_rx_desc_err_dropped_pkt;
295  			data[i++] = rx->rx_copybreak_pkt;
296  			data[i++] = rx->rx_copied_pkt;
297  			/* stats from NIC */
298  			if (skip_nic_stats) {
299  				/* skip NIC rx stats */
300  				i += NIC_RX_STATS_REPORT_NUM;
301  			} else {
302  				stats_idx = rx_qid_to_stats_idx[ring];
303  				for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
304  					u64 value =
305  						be64_to_cpu(report_stats[stats_idx + j].value);
306  
307  					data[i++] = value;
308  				}
309  			}
310  			/* XDP rx counters */
311  			do {
312  				start =	u64_stats_fetch_begin(&priv->rx[ring].statss);
313  				for (j = 0; j < GVE_XDP_ACTIONS; j++)
314  					data[i + j] = rx->xdp_actions[j];
315  				data[i + j++] = rx->xdp_tx_errors;
316  				data[i + j++] = rx->xdp_redirect_errors;
317  				data[i + j++] = rx->xdp_alloc_fails;
318  			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
319  						       start));
320  			i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
321  		}
322  	} else {
323  		i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
324  	}
325  
326  	/* For tx cross-reporting stats, start from nic tx stats in report */
327  	base_stats_idx = max_stats_idx;
328  	max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
329  		max_stats_idx;
330  	/* Preprocess the stats report for tx, map queue id to start index */
331  	skip_nic_stats = false;
332  	for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
333  		stats_idx += NIC_TX_STATS_REPORT_NUM) {
334  		u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
335  		u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
336  
337  		if (stat_name == 0) {
338  			/* no stats written by NIC yet */
339  			skip_nic_stats = true;
340  			break;
341  		}
342  		tx_qid_to_stats_idx[queue_id] = stats_idx;
343  	}
344  	/* walk TX rings */
345  	if (priv->tx) {
346  		for (ring = 0; ring < num_tx_queues; ring++) {
347  			struct gve_tx_ring *tx = &priv->tx[ring];
348  
349  			if (gve_is_gqi(priv)) {
350  				data[i++] = tx->req;
351  				data[i++] = tx->done;
352  				data[i++] = tx->req - tx->done;
353  			} else {
354  				/* DQO doesn't currently support
355  				 * posted/completed descriptor counts;
356  				 */
357  				data[i++] = 0;
358  				data[i++] = 0;
359  				data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
360  			}
361  			do {
362  				start =
363  				  u64_stats_fetch_begin(&priv->tx[ring].statss);
364  				tmp_tx_bytes = tx->bytes_done;
365  			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
366  						       start));
367  			data[i++] = tmp_tx_bytes;
368  			data[i++] = tx->wake_queue;
369  			data[i++] = tx->stop_queue;
370  			data[i++] = gve_tx_load_event_counter(priv, tx);
371  			data[i++] = tx->dma_mapping_error;
372  			/* stats from NIC */
373  			if (skip_nic_stats) {
374  				/* skip NIC tx stats */
375  				i += NIC_TX_STATS_REPORT_NUM;
376  			} else {
377  				stats_idx = tx_qid_to_stats_idx[ring];
378  				for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
379  					u64 value =
380  						be64_to_cpu(report_stats[stats_idx + j].value);
381  					data[i++] = value;
382  				}
383  			}
384  			/* XDP xsk counters */
385  			data[i++] = tx->xdp_xsk_wakeup;
386  			data[i++] = tx->xdp_xsk_done;
387  			do {
388  				start = u64_stats_fetch_begin(&priv->tx[ring].statss);
389  				data[i] = tx->xdp_xsk_sent;
390  				data[i + 1] = tx->xdp_xmit;
391  				data[i + 2] = tx->xdp_xmit_errors;
392  			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
393  						       start));
394  			i += 3; /* XDP tx counters */
395  		}
396  	} else {
397  		i += num_tx_queues * NUM_GVE_TX_CNTS;
398  	}
399  
400  	kfree(rx_qid_to_stats_idx);
401  	kfree(tx_qid_to_stats_idx);
402  	/* AQ Stats */
403  	data[i++] = priv->adminq_prod_cnt;
404  	data[i++] = priv->adminq_cmd_fail;
405  	data[i++] = priv->adminq_timeouts;
406  	data[i++] = priv->adminq_describe_device_cnt;
407  	data[i++] = priv->adminq_cfg_device_resources_cnt;
408  	data[i++] = priv->adminq_register_page_list_cnt;
409  	data[i++] = priv->adminq_unregister_page_list_cnt;
410  	data[i++] = priv->adminq_create_tx_queue_cnt;
411  	data[i++] = priv->adminq_create_rx_queue_cnt;
412  	data[i++] = priv->adminq_destroy_tx_queue_cnt;
413  	data[i++] = priv->adminq_destroy_rx_queue_cnt;
414  	data[i++] = priv->adminq_dcfg_device_resources_cnt;
415  	data[i++] = priv->adminq_set_driver_parameter_cnt;
416  	data[i++] = priv->adminq_report_stats_cnt;
417  	data[i++] = priv->adminq_report_link_speed_cnt;
418  }
419  
gve_get_channels(struct net_device * netdev,struct ethtool_channels * cmd)420  static void gve_get_channels(struct net_device *netdev,
421  			     struct ethtool_channels *cmd)
422  {
423  	struct gve_priv *priv = netdev_priv(netdev);
424  
425  	cmd->max_rx = priv->rx_cfg.max_queues;
426  	cmd->max_tx = priv->tx_cfg.max_queues;
427  	cmd->max_other = 0;
428  	cmd->max_combined = 0;
429  	cmd->rx_count = priv->rx_cfg.num_queues;
430  	cmd->tx_count = priv->tx_cfg.num_queues;
431  	cmd->other_count = 0;
432  	cmd->combined_count = 0;
433  }
434  
gve_set_channels(struct net_device * netdev,struct ethtool_channels * cmd)435  static int gve_set_channels(struct net_device *netdev,
436  			    struct ethtool_channels *cmd)
437  {
438  	struct gve_priv *priv = netdev_priv(netdev);
439  	struct gve_queue_config new_tx_cfg = priv->tx_cfg;
440  	struct gve_queue_config new_rx_cfg = priv->rx_cfg;
441  	struct ethtool_channels old_settings;
442  	int new_tx = cmd->tx_count;
443  	int new_rx = cmd->rx_count;
444  
445  	gve_get_channels(netdev, &old_settings);
446  
447  	/* Changing combined is not allowed */
448  	if (cmd->combined_count != old_settings.combined_count)
449  		return -EINVAL;
450  
451  	if (!new_rx || !new_tx)
452  		return -EINVAL;
453  
454  	if (priv->num_xdp_queues &&
455  	    (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
456  		dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
457  		return -EINVAL;
458  	}
459  
460  	if (!netif_carrier_ok(netdev)) {
461  		priv->tx_cfg.num_queues = new_tx;
462  		priv->rx_cfg.num_queues = new_rx;
463  		return 0;
464  	}
465  
466  	new_tx_cfg.num_queues = new_tx;
467  	new_rx_cfg.num_queues = new_rx;
468  
469  	return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
470  }
471  
gve_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * cmd,struct kernel_ethtool_ringparam * kernel_cmd,struct netlink_ext_ack * extack)472  static void gve_get_ringparam(struct net_device *netdev,
473  			      struct ethtool_ringparam *cmd,
474  			      struct kernel_ethtool_ringparam *kernel_cmd,
475  			      struct netlink_ext_ack *extack)
476  {
477  	struct gve_priv *priv = netdev_priv(netdev);
478  
479  	cmd->rx_max_pending = priv->rx_desc_cnt;
480  	cmd->tx_max_pending = priv->tx_desc_cnt;
481  	cmd->rx_pending = priv->rx_desc_cnt;
482  	cmd->tx_pending = priv->tx_desc_cnt;
483  }
484  
gve_user_reset(struct net_device * netdev,u32 * flags)485  static int gve_user_reset(struct net_device *netdev, u32 *flags)
486  {
487  	struct gve_priv *priv = netdev_priv(netdev);
488  
489  	if (*flags == ETH_RESET_ALL) {
490  		*flags = 0;
491  		return gve_reset(priv, true);
492  	}
493  
494  	return -EOPNOTSUPP;
495  }
496  
gve_get_tunable(struct net_device * netdev,const struct ethtool_tunable * etuna,void * value)497  static int gve_get_tunable(struct net_device *netdev,
498  			   const struct ethtool_tunable *etuna, void *value)
499  {
500  	struct gve_priv *priv = netdev_priv(netdev);
501  
502  	switch (etuna->id) {
503  	case ETHTOOL_RX_COPYBREAK:
504  		*(u32 *)value = priv->rx_copybreak;
505  		return 0;
506  	default:
507  		return -EOPNOTSUPP;
508  	}
509  }
510  
gve_set_tunable(struct net_device * netdev,const struct ethtool_tunable * etuna,const void * value)511  static int gve_set_tunable(struct net_device *netdev,
512  			   const struct ethtool_tunable *etuna,
513  			   const void *value)
514  {
515  	struct gve_priv *priv = netdev_priv(netdev);
516  	u32 len;
517  
518  	switch (etuna->id) {
519  	case ETHTOOL_RX_COPYBREAK:
520  	{
521  		u32 max_copybreak = gve_is_gqi(priv) ?
522  			(PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
523  
524  		len = *(u32 *)value;
525  		if (len > max_copybreak)
526  			return -EINVAL;
527  		priv->rx_copybreak = len;
528  		return 0;
529  	}
530  	default:
531  		return -EOPNOTSUPP;
532  	}
533  }
534  
gve_get_priv_flags(struct net_device * netdev)535  static u32 gve_get_priv_flags(struct net_device *netdev)
536  {
537  	struct gve_priv *priv = netdev_priv(netdev);
538  	u32 ret_flags = 0;
539  
540  	/* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
541  	if (priv->ethtool_flags & BIT(0))
542  		ret_flags |= BIT(0);
543  	return ret_flags;
544  }
545  
gve_set_priv_flags(struct net_device * netdev,u32 flags)546  static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
547  {
548  	struct gve_priv *priv = netdev_priv(netdev);
549  	u64 ori_flags, new_flags;
550  	int num_tx_queues;
551  
552  	num_tx_queues = gve_num_tx_queues(priv);
553  	ori_flags = READ_ONCE(priv->ethtool_flags);
554  	new_flags = ori_flags;
555  
556  	/* Only one priv flag exists: report-stats (BIT(0))*/
557  	if (flags & BIT(0))
558  		new_flags |= BIT(0);
559  	else
560  		new_flags &= ~(BIT(0));
561  	priv->ethtool_flags = new_flags;
562  	/* start report-stats timer when user turns report stats on. */
563  	if (flags & BIT(0)) {
564  		mod_timer(&priv->stats_report_timer,
565  			  round_jiffies(jiffies +
566  					msecs_to_jiffies(priv->stats_report_timer_period)));
567  	}
568  	/* Zero off gve stats when report-stats turned off and */
569  	/* delete report stats timer. */
570  	if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
571  		int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
572  			num_tx_queues;
573  		int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
574  			priv->rx_cfg.num_queues;
575  
576  		memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
577  				   sizeof(struct stats));
578  		del_timer_sync(&priv->stats_report_timer);
579  	}
580  	return 0;
581  }
582  
gve_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)583  static int gve_get_link_ksettings(struct net_device *netdev,
584  				  struct ethtool_link_ksettings *cmd)
585  {
586  	struct gve_priv *priv = netdev_priv(netdev);
587  	int err = 0;
588  
589  	if (priv->link_speed == 0)
590  		err = gve_adminq_report_link_speed(priv);
591  
592  	cmd->base.speed = priv->link_speed;
593  
594  	cmd->base.duplex = DUPLEX_FULL;
595  
596  	return err;
597  }
598  
gve_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_ec,struct netlink_ext_ack * extack)599  static int gve_get_coalesce(struct net_device *netdev,
600  			    struct ethtool_coalesce *ec,
601  			    struct kernel_ethtool_coalesce *kernel_ec,
602  			    struct netlink_ext_ack *extack)
603  {
604  	struct gve_priv *priv = netdev_priv(netdev);
605  
606  	if (gve_is_gqi(priv))
607  		return -EOPNOTSUPP;
608  	ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
609  	ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
610  
611  	return 0;
612  }
613  
gve_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_ec,struct netlink_ext_ack * extack)614  static int gve_set_coalesce(struct net_device *netdev,
615  			    struct ethtool_coalesce *ec,
616  			    struct kernel_ethtool_coalesce *kernel_ec,
617  			    struct netlink_ext_ack *extack)
618  {
619  	struct gve_priv *priv = netdev_priv(netdev);
620  	u32 tx_usecs_orig = priv->tx_coalesce_usecs;
621  	u32 rx_usecs_orig = priv->rx_coalesce_usecs;
622  	int idx;
623  
624  	if (gve_is_gqi(priv))
625  		return -EOPNOTSUPP;
626  
627  	if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
628  	    ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
629  		return -EINVAL;
630  	priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
631  	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
632  
633  	if (tx_usecs_orig != priv->tx_coalesce_usecs) {
634  		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
635  			int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
636  			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
637  
638  			gve_set_itr_coalesce_usecs_dqo(priv, block,
639  						       priv->tx_coalesce_usecs);
640  		}
641  	}
642  
643  	if (rx_usecs_orig != priv->rx_coalesce_usecs) {
644  		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
645  			int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
646  			struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
647  
648  			gve_set_itr_coalesce_usecs_dqo(priv, block,
649  						       priv->rx_coalesce_usecs);
650  		}
651  	}
652  
653  	return 0;
654  }
655  
656  const struct ethtool_ops gve_ethtool_ops = {
657  	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
658  	.get_drvinfo = gve_get_drvinfo,
659  	.get_strings = gve_get_strings,
660  	.get_sset_count = gve_get_sset_count,
661  	.get_ethtool_stats = gve_get_ethtool_stats,
662  	.set_msglevel = gve_set_msglevel,
663  	.get_msglevel = gve_get_msglevel,
664  	.set_channels = gve_set_channels,
665  	.get_channels = gve_get_channels,
666  	.get_link = ethtool_op_get_link,
667  	.get_coalesce = gve_get_coalesce,
668  	.set_coalesce = gve_set_coalesce,
669  	.get_ringparam = gve_get_ringparam,
670  	.reset = gve_user_reset,
671  	.get_tunable = gve_get_tunable,
672  	.set_tunable = gve_set_tunable,
673  	.get_priv_flags = gve_get_priv_flags,
674  	.set_priv_flags = gve_set_priv_flags,
675  	.get_link_ksettings = gve_get_link_ksettings
676  };
677