1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/events.h"
34 #include "en.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 #include "en/port.h"
39 
40 #ifdef CONFIG_PAGE_POOL_STATS
41 #include <net/page_pool/helpers.h>
42 #endif
43 
stats_grps_num(struct mlx5e_priv * priv)44 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
45 {
46 	return !priv->profile->stats_grps_num ? 0 :
47 		priv->profile->stats_grps_num(priv);
48 }
49 
mlx5e_stats_total_num(struct mlx5e_priv * priv)50 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
51 {
52 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
53 	const unsigned int num_stats_grps = stats_grps_num(priv);
54 	unsigned int total = 0;
55 	int i;
56 
57 	for (i = 0; i < num_stats_grps; i++)
58 		total += stats_grps[i]->get_num_stats(priv);
59 
60 	return total;
61 }
62 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)63 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
64 {
65 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
66 	const unsigned int num_stats_grps = stats_grps_num(priv);
67 	int i;
68 
69 	for (i = num_stats_grps - 1; i >= 0; i--)
70 		if (stats_grps[i]->update_stats &&
71 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
72 			stats_grps[i]->update_stats(priv);
73 }
74 
mlx5e_stats_update(struct mlx5e_priv * priv)75 void mlx5e_stats_update(struct mlx5e_priv *priv)
76 {
77 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
78 	const unsigned int num_stats_grps = stats_grps_num(priv);
79 	int i;
80 
81 	for (i = num_stats_grps - 1; i >= 0; i--)
82 		if (stats_grps[i]->update_stats)
83 			stats_grps[i]->update_stats(priv);
84 }
85 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)86 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
87 {
88 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
89 	const unsigned int num_stats_grps = stats_grps_num(priv);
90 	int i;
91 
92 	for (i = 0; i < num_stats_grps; i++)
93 		idx = stats_grps[i]->fill_stats(priv, data, idx);
94 }
95 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)96 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
97 {
98 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
99 	const unsigned int num_stats_grps = stats_grps_num(priv);
100 	int i, idx = 0;
101 
102 	for (i = 0; i < num_stats_grps; i++)
103 		idx = stats_grps[i]->fill_strings(priv, data, idx);
104 }
105 
106 /* Concrete NIC Stats */
107 
108 static const struct counter_desc sw_stats_desc[] = {
109 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
110 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
111 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
116 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
121 
122 #ifdef CONFIG_MLX5_EN_TLS
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
127 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
132 #endif
133 
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
183 #ifdef CONFIG_MLX5_EN_ARFS
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
189 #endif
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
191 #ifdef CONFIG_PAGE_POOL_STATS
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
203 #endif
204 #ifdef CONFIG_MLX5_EN_TLS
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
215 #endif
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
223 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
224 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
225 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
226 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
227 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
228 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
229 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
230 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
231 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
232 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
233 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
234 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
235 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
236 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
237 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
238 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
239 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
240 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
241 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
242 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
243 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
244 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
245 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
246 };
247 
248 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
249 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)250 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
251 {
252 	return NUM_SW_COUNTERS;
253 }
254 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)255 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
256 {
257 	int i;
258 
259 	for (i = 0; i < NUM_SW_COUNTERS; i++)
260 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
261 	return idx;
262 }
263 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)264 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
265 {
266 	int i;
267 
268 	for (i = 0; i < NUM_SW_COUNTERS; i++)
269 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
270 	return idx;
271 }
272 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)273 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
274 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
275 {
276 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
277 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
278 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
279 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
280 	s->tx_xdp_full  += xdpsq_red_stats->full;
281 	s->tx_xdp_err   += xdpsq_red_stats->err;
282 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
283 }
284 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)285 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
286 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
287 {
288 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
289 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
290 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
291 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
292 	s->rx_xdp_tx_full  += xdpsq_stats->full;
293 	s->rx_xdp_tx_err   += xdpsq_stats->err;
294 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
295 }
296 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)297 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
298 						  struct mlx5e_xdpsq_stats *xsksq_stats)
299 {
300 	s->tx_xsk_xmit  += xsksq_stats->xmit;
301 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
302 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
303 	s->tx_xsk_full  += xsksq_stats->full;
304 	s->tx_xsk_err   += xsksq_stats->err;
305 	s->tx_xsk_cqes  += xsksq_stats->cqes;
306 }
307 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)308 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
309 						  struct mlx5e_rq_stats *xskrq_stats)
310 {
311 	s->rx_xsk_packets                += xskrq_stats->packets;
312 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
313 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
314 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
315 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
316 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
317 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
318 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
319 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
320 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
321 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
322 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
323 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
324 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
325 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
326 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
327 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
328 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
329 }
330 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)331 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
332 						     struct mlx5e_rq_stats *rq_stats)
333 {
334 	s->rx_packets                 += rq_stats->packets;
335 	s->rx_bytes                   += rq_stats->bytes;
336 	s->rx_lro_packets             += rq_stats->lro_packets;
337 	s->rx_lro_bytes               += rq_stats->lro_bytes;
338 	s->rx_gro_packets             += rq_stats->gro_packets;
339 	s->rx_gro_bytes               += rq_stats->gro_bytes;
340 	s->rx_gro_skbs                += rq_stats->gro_skbs;
341 	s->rx_gro_match_packets       += rq_stats->gro_match_packets;
342 	s->rx_gro_large_hds           += rq_stats->gro_large_hds;
343 	s->rx_ecn_mark                += rq_stats->ecn_mark;
344 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
345 	s->rx_csum_none               += rq_stats->csum_none;
346 	s->rx_csum_complete           += rq_stats->csum_complete;
347 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
348 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
349 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
350 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
351 	s->rx_xdp_drop                += rq_stats->xdp_drop;
352 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
353 	s->rx_wqe_err                 += rq_stats->wqe_err;
354 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
355 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
356 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
357 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
358 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
359 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
360 	s->rx_congst_umr              += rq_stats->congst_umr;
361 #ifdef CONFIG_MLX5_EN_ARFS
362 	s->rx_arfs_add                += rq_stats->arfs_add;
363 	s->rx_arfs_request_in         += rq_stats->arfs_request_in;
364 	s->rx_arfs_request_out        += rq_stats->arfs_request_out;
365 	s->rx_arfs_expired            += rq_stats->arfs_expired;
366 	s->rx_arfs_err                += rq_stats->arfs_err;
367 #endif
368 	s->rx_recover                 += rq_stats->recover;
369 #ifdef CONFIG_PAGE_POOL_STATS
370 	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
371 	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
372 	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
373 	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
374 	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
375 	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
376 	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
377 	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
378 	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
379 	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
380 	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
381 #endif
382 #ifdef CONFIG_MLX5_EN_TLS
383 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
384 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
385 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
386 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
387 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
388 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
389 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
390 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
391 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
392 	s->rx_tls_err                 += rq_stats->tls_err;
393 #endif
394 }
395 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)396 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
397 						     struct mlx5e_ch_stats *ch_stats)
398 {
399 	s->ch_events      += ch_stats->events;
400 	s->ch_poll        += ch_stats->poll;
401 	s->ch_arm         += ch_stats->arm;
402 	s->ch_aff_change  += ch_stats->aff_change;
403 	s->ch_force_irq   += ch_stats->force_irq;
404 	s->ch_eq_rearm    += ch_stats->eq_rearm;
405 }
406 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)407 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
408 					       struct mlx5e_sq_stats *sq_stats)
409 {
410 	s->tx_packets               += sq_stats->packets;
411 	s->tx_bytes                 += sq_stats->bytes;
412 	s->tx_tso_packets           += sq_stats->tso_packets;
413 	s->tx_tso_bytes             += sq_stats->tso_bytes;
414 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
415 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
416 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
417 	s->tx_nop                   += sq_stats->nop;
418 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
419 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
420 	s->tx_queue_stopped         += sq_stats->stopped;
421 	s->tx_queue_wake            += sq_stats->wake;
422 	s->tx_queue_dropped         += sq_stats->dropped;
423 	s->tx_cqe_err               += sq_stats->cqe_err;
424 	s->tx_recover               += sq_stats->recover;
425 	s->tx_xmit_more             += sq_stats->xmit_more;
426 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
427 	s->tx_csum_none             += sq_stats->csum_none;
428 	s->tx_csum_partial          += sq_stats->csum_partial;
429 #ifdef CONFIG_MLX5_EN_TLS
430 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
431 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
432 	s->tx_tls_ooo               += sq_stats->tls_ooo;
433 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
434 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
435 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
436 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
437 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
438 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
439 #endif
440 	s->tx_cqes                  += sq_stats->cqes;
441 }
442 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)443 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
444 						struct mlx5e_sw_stats *s)
445 {
446 	int i;
447 
448 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
449 		return;
450 
451 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
452 
453 	if (priv->tx_ptp_opened) {
454 		for (i = 0; i < priv->max_opened_tc; i++) {
455 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
456 
457 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
458 			barrier();
459 		}
460 	}
461 	if (priv->rx_ptp_opened) {
462 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
463 
464 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
465 		barrier();
466 	}
467 }
468 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)469 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
470 						struct mlx5e_sw_stats *s)
471 {
472 	struct mlx5e_sq_stats **stats;
473 	u16 max_qos_sqs;
474 	int i;
475 
476 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
477 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
478 	stats = READ_ONCE(priv->htb_qos_sq_stats);
479 
480 	for (i = 0; i < max_qos_sqs; i++) {
481 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
482 
483 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
484 		barrier();
485 	}
486 }
487 
488 #ifdef CONFIG_PAGE_POOL_STATS
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)489 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
490 {
491 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
492 	struct page_pool *pool = c->rq.page_pool;
493 	struct page_pool_stats stats = { 0 };
494 
495 	if (!page_pool_get_stats(pool, &stats))
496 		return;
497 
498 	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
499 	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
500 	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
501 	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
502 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
503 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
504 
505 	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
506 	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
507 	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
508 	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
509 	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
510 }
511 #else
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)512 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
513 {
514 }
515 #endif
516 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)517 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
518 {
519 	struct mlx5e_sw_stats *s = &priv->stats.sw;
520 	int i;
521 
522 	memset(s, 0, sizeof(*s));
523 
524 	for (i = 0; i < priv->channels.num; i++) /* for active channels only */
525 		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
526 
527 	for (i = 0; i < priv->stats_nch; i++) {
528 		struct mlx5e_channel_stats *channel_stats =
529 			priv->channel_stats[i];
530 
531 		int j;
532 
533 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
534 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
535 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
536 		/* xdp redirect */
537 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
538 		/* AF_XDP zero-copy */
539 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
540 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
541 
542 		for (j = 0; j < priv->max_opened_tc; j++) {
543 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
544 
545 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
546 			barrier();
547 		}
548 	}
549 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
550 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
551 }
552 
553 static const struct counter_desc q_stats_desc[] = {
554 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
555 };
556 
557 static const struct counter_desc drop_rq_stats_desc[] = {
558 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
559 };
560 
561 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
562 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
563 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)564 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
565 {
566 	int num_stats = 0;
567 
568 	if (priv->q_counter)
569 		num_stats += NUM_Q_COUNTERS;
570 
571 	if (priv->drop_rq_q_counter)
572 		num_stats += NUM_DROP_RQ_COUNTERS;
573 
574 	return num_stats;
575 }
576 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)577 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
578 {
579 	int i;
580 
581 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
582 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
583 		       q_stats_desc[i].format);
584 
585 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
586 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
587 		       drop_rq_stats_desc[i].format);
588 
589 	return idx;
590 }
591 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)592 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
593 {
594 	int i;
595 
596 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
597 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
598 						   q_stats_desc, i);
599 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
600 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
601 						   drop_rq_stats_desc, i);
602 	return idx;
603 }
604 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)605 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
606 {
607 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
608 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
609 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
610 	int ret;
611 
612 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
613 
614 	if (priv->q_counter) {
615 		MLX5_SET(query_q_counter_in, in, counter_set_id,
616 			 priv->q_counter);
617 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
618 		if (!ret)
619 			qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
620 							  out, out_of_buffer);
621 	}
622 
623 	if (priv->drop_rq_q_counter) {
624 		MLX5_SET(query_q_counter_in, in, counter_set_id,
625 			 priv->drop_rq_q_counter);
626 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
627 		if (!ret)
628 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
629 							    out, out_of_buffer);
630 	}
631 }
632 
633 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
634 static const struct counter_desc vnic_env_stats_steer_desc[] = {
635 	{ "rx_steer_missed_packets",
636 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
637 };
638 
639 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
640 	{ "dev_internal_queue_oob",
641 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
642 };
643 
644 static const struct counter_desc vnic_env_stats_drop_desc[] = {
645 	{ "rx_oversize_pkts_buffer",
646 		VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
647 };
648 
649 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
650 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
651 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
652 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
653 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
654 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
655 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
656 	(MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
657 	 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
658 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)659 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
660 {
661 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
662 	       NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
663 	       NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
664 }
665 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)666 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
667 {
668 	int i;
669 
670 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
671 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
672 		       vnic_env_stats_steer_desc[i].format);
673 
674 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
675 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
676 		       vnic_env_stats_dev_oob_desc[i].format);
677 
678 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
679 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
680 		       vnic_env_stats_drop_desc[i].format);
681 
682 	return idx;
683 }
684 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)685 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
686 {
687 	int i;
688 
689 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
690 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
691 						  vnic_env_stats_steer_desc, i);
692 
693 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
694 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
695 						  vnic_env_stats_dev_oob_desc, i);
696 
697 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
698 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
699 						  vnic_env_stats_drop_desc, i);
700 
701 	return idx;
702 }
703 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)704 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
705 {
706 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
707 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
708 	struct mlx5_core_dev *mdev = priv->mdev;
709 
710 	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
711 		return;
712 
713 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
714 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
715 }
716 
717 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
718 static const struct counter_desc vport_stats_desc[] = {
719 	{ "rx_vport_unicast_packets",
720 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
721 	{ "rx_vport_unicast_bytes",
722 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
723 	{ "tx_vport_unicast_packets",
724 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
725 	{ "tx_vport_unicast_bytes",
726 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
727 	{ "rx_vport_multicast_packets",
728 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
729 	{ "rx_vport_multicast_bytes",
730 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
731 	{ "tx_vport_multicast_packets",
732 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
733 	{ "tx_vport_multicast_bytes",
734 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
735 	{ "rx_vport_broadcast_packets",
736 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
737 	{ "rx_vport_broadcast_bytes",
738 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
739 	{ "tx_vport_broadcast_packets",
740 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
741 	{ "tx_vport_broadcast_bytes",
742 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
743 	{ "rx_vport_rdma_unicast_packets",
744 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
745 	{ "rx_vport_rdma_unicast_bytes",
746 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
747 	{ "tx_vport_rdma_unicast_packets",
748 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
749 	{ "tx_vport_rdma_unicast_bytes",
750 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
751 	{ "rx_vport_rdma_multicast_packets",
752 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
753 	{ "rx_vport_rdma_multicast_bytes",
754 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
755 	{ "tx_vport_rdma_multicast_packets",
756 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
757 	{ "tx_vport_rdma_multicast_bytes",
758 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
759 };
760 
761 static const struct counter_desc vport_loopback_stats_desc[] = {
762 	{ "vport_loopback_packets",
763 		VPORT_COUNTER_OFF(local_loopback.packets) },
764 	{ "vport_loopback_bytes",
765 		VPORT_COUNTER_OFF(local_loopback.octets) },
766 };
767 
768 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
769 #define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
770 	(MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
771 	 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
772 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)773 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
774 {
775 	return NUM_VPORT_COUNTERS +
776 		NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
777 }
778 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)779 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
780 {
781 	int i;
782 
783 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
784 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
785 
786 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
787 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
788 		       vport_loopback_stats_desc[i].format);
789 
790 	return idx;
791 }
792 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)793 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
794 {
795 	int i;
796 
797 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
798 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
799 						  vport_stats_desc, i);
800 
801 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
802 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
803 						  vport_loopback_stats_desc, i);
804 
805 	return idx;
806 }
807 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)808 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
809 {
810 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
811 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
812 	struct mlx5_core_dev *mdev = priv->mdev;
813 
814 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
815 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
816 }
817 
818 #define PPORT_802_3_OFF(c) \
819 	MLX5_BYTE_OFF(ppcnt_reg, \
820 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
821 static const struct counter_desc pport_802_3_stats_desc[] = {
822 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
823 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
824 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
825 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
826 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
827 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
828 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
829 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
830 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
831 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
832 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
833 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
834 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
835 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
836 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
837 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
838 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
839 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
840 };
841 
842 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
843 
844 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
845 {
846 	return NUM_PPORT_802_3_COUNTERS;
847 }
848 
849 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
850 {
851 	int i;
852 
853 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
854 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
855 	return idx;
856 }
857 
858 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
859 {
860 	int i;
861 
862 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
863 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
864 						  pport_802_3_stats_desc, i);
865 	return idx;
866 }
867 
868 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
869 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
870 
871 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
872 {
873 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
874 	struct mlx5_core_dev *mdev = priv->mdev;
875 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
876 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
877 	void *out;
878 
879 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
880 		return;
881 
882 	MLX5_SET(ppcnt_reg, in, local_port, 1);
883 	out = pstats->IEEE_802_3_counters;
884 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
885 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
886 }
887 
888 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
889 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
890 		MLX5_BYTE_OFF(ppcnt_reg,		\
891 			      counter_set.set.c##_high)))
892 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)893 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
894 				u32 *ppcnt_ieee_802_3)
895 {
896 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
897 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
898 
899 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
900 		return -EOPNOTSUPP;
901 
902 	MLX5_SET(ppcnt_reg, in, local_port, 1);
903 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
904 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
905 				    sz, MLX5_REG_PPCNT, 0, 0);
906 }
907 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)908 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
909 			   struct ethtool_pause_stats *pause_stats)
910 {
911 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
912 	struct mlx5_core_dev *mdev = priv->mdev;
913 
914 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
915 		return;
916 
917 	pause_stats->tx_pause_frames =
918 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
919 				      eth_802_3_cntrs_grp_data_layout,
920 				      a_pause_mac_ctrl_frames_transmitted);
921 	pause_stats->rx_pause_frames =
922 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
923 				      eth_802_3_cntrs_grp_data_layout,
924 				      a_pause_mac_ctrl_frames_received);
925 }
926 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)927 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
928 			     struct ethtool_eth_phy_stats *phy_stats)
929 {
930 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
931 	struct mlx5_core_dev *mdev = priv->mdev;
932 
933 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
934 		return;
935 
936 	phy_stats->SymbolErrorDuringCarrier =
937 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
938 				      eth_802_3_cntrs_grp_data_layout,
939 				      a_symbol_error_during_carrier);
940 }
941 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)942 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
943 			     struct ethtool_eth_mac_stats *mac_stats)
944 {
945 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
946 	struct mlx5_core_dev *mdev = priv->mdev;
947 
948 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
949 		return;
950 
951 #define RD(name)							\
952 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
953 			      eth_802_3_cntrs_grp_data_layout,		\
954 			      name)
955 
956 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
957 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
958 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
959 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
960 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
961 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
962 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
963 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
964 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
965 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
966 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
967 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
968 #undef RD
969 }
970 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)971 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
972 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
973 {
974 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
975 	struct mlx5_core_dev *mdev = priv->mdev;
976 
977 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
978 		return;
979 
980 	ctrl_stats->MACControlFramesTransmitted =
981 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
982 				      eth_802_3_cntrs_grp_data_layout,
983 				      a_mac_control_frames_transmitted);
984 	ctrl_stats->MACControlFramesReceived =
985 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
986 				      eth_802_3_cntrs_grp_data_layout,
987 				      a_mac_control_frames_received);
988 	ctrl_stats->UnsupportedOpcodesReceived =
989 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
990 				      eth_802_3_cntrs_grp_data_layout,
991 				      a_unsupported_opcodes_received);
992 }
993 
994 #define PPORT_2863_OFF(c) \
995 	MLX5_BYTE_OFF(ppcnt_reg, \
996 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
997 static const struct counter_desc pport_2863_stats_desc[] = {
998 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
999 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
1000 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
1001 };
1002 
1003 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
1004 
1005 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
1006 {
1007 	return NUM_PPORT_2863_COUNTERS;
1008 }
1009 
1010 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
1011 {
1012 	int i;
1013 
1014 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1015 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
1016 	return idx;
1017 }
1018 
1019 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
1020 {
1021 	int i;
1022 
1023 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1024 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
1025 						  pport_2863_stats_desc, i);
1026 	return idx;
1027 }
1028 
1029 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1030 {
1031 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1032 	struct mlx5_core_dev *mdev = priv->mdev;
1033 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1034 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1035 	void *out;
1036 
1037 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1038 	out = pstats->RFC_2863_counters;
1039 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1040 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1041 }
1042 
1043 #define PPORT_2819_OFF(c) \
1044 	MLX5_BYTE_OFF(ppcnt_reg, \
1045 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1046 static const struct counter_desc pport_2819_stats_desc[] = {
1047 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1048 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1049 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1050 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1051 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1052 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1053 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1054 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1055 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1056 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1057 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1058 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1059 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1060 };
1061 
1062 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
1063 
1064 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1065 {
1066 	return NUM_PPORT_2819_COUNTERS;
1067 }
1068 
1069 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1070 {
1071 	int i;
1072 
1073 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1074 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
1075 	return idx;
1076 }
1077 
1078 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1079 {
1080 	int i;
1081 
1082 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1083 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1084 						  pport_2819_stats_desc, i);
1085 	return idx;
1086 }
1087 
1088 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1089 {
1090 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1091 	struct mlx5_core_dev *mdev = priv->mdev;
1092 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1093 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1094 	void *out;
1095 
1096 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1097 		return;
1098 
1099 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1100 	out = pstats->RFC_2819_counters;
1101 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1102 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1103 }
1104 
1105 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1106 	{    0,    64 },
1107 	{   65,   127 },
1108 	{  128,   255 },
1109 	{  256,   511 },
1110 	{  512,  1023 },
1111 	{ 1024,  1518 },
1112 	{ 1519,  2047 },
1113 	{ 2048,  4095 },
1114 	{ 4096,  8191 },
1115 	{ 8192, 10239 },
1116 	{}
1117 };
1118 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1119 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1120 			  struct ethtool_rmon_stats *rmon,
1121 			  const struct ethtool_rmon_hist_range **ranges)
1122 {
1123 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1124 	struct mlx5_core_dev *mdev = priv->mdev;
1125 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1126 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1127 
1128 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1129 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1130 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1131 				 sz, MLX5_REG_PPCNT, 0, 0))
1132 		return;
1133 
1134 #define RD(name)						\
1135 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1136 			      eth_2819_cntrs_grp_data_layout,	\
1137 			      name)
1138 
1139 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1140 	rmon->fragments		= RD(ether_stats_fragments);
1141 	rmon->jabbers		= RD(ether_stats_jabbers);
1142 
1143 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1144 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1145 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1146 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1147 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1148 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1149 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1150 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1151 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1152 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1153 #undef RD
1154 
1155 	*ranges = mlx5e_rmon_ranges;
1156 }
1157 
1158 #define PPORT_PHY_STATISTICAL_OFF(c) \
1159 	MLX5_BYTE_OFF(ppcnt_reg, \
1160 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1161 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1162 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1163 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1164 };
1165 
1166 static const struct counter_desc
1167 pport_phy_statistical_err_lanes_stats_desc[] = {
1168 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1169 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1170 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1171 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1172 };
1173 
1174 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1175 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1176 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1177 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1178 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1179 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1180 {
1181 	struct mlx5_core_dev *mdev = priv->mdev;
1182 	int num_stats;
1183 
1184 	/* "1" for link_down_events special counter */
1185 	num_stats = 1;
1186 
1187 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1188 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1189 
1190 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1191 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1192 
1193 	return num_stats;
1194 }
1195 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1196 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1197 {
1198 	struct mlx5_core_dev *mdev = priv->mdev;
1199 	int i;
1200 
1201 	strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1202 
1203 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1204 		return idx;
1205 
1206 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1207 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1208 		       pport_phy_statistical_stats_desc[i].format);
1209 
1210 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1211 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1212 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1213 			       pport_phy_statistical_err_lanes_stats_desc[i].format);
1214 
1215 	return idx;
1216 }
1217 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1218 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1219 {
1220 	struct mlx5_core_dev *mdev = priv->mdev;
1221 	int i;
1222 
1223 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
1224 	data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1225 			       counter_set.phys_layer_cntrs.link_down_events);
1226 
1227 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1228 		return idx;
1229 
1230 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1231 		data[idx++] =
1232 			MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1233 					    pport_phy_statistical_stats_desc, i);
1234 
1235 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1236 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1237 			data[idx++] =
1238 				MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1239 						    pport_phy_statistical_err_lanes_stats_desc,
1240 						    i);
1241 	return idx;
1242 }
1243 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1244 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1245 {
1246 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1247 	struct mlx5_core_dev *mdev = priv->mdev;
1248 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1249 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1250 	void *out;
1251 
1252 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1253 	out = pstats->phy_counters;
1254 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1255 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1256 
1257 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1258 		return;
1259 
1260 	out = pstats->phy_statistical_counters;
1261 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1262 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1263 }
1264 
mlx5e_get_link_ext_stats(struct net_device * dev,struct ethtool_link_ext_stats * stats)1265 void mlx5e_get_link_ext_stats(struct net_device *dev,
1266 			      struct ethtool_link_ext_stats *stats)
1267 {
1268 	struct mlx5e_priv *priv = netdev_priv(dev);
1269 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1270 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1271 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1272 
1273 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1274 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1275 	mlx5_core_access_reg(priv->mdev, in, sz, out,
1276 			     MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1277 
1278 	stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1279 					   counter_set.phys_layer_cntrs.link_down_events);
1280 }
1281 
fec_num_lanes(struct mlx5_core_dev * dev)1282 static int fec_num_lanes(struct mlx5_core_dev *dev)
1283 {
1284 	u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1285 	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1286 	int err;
1287 
1288 	MLX5_SET(pmlp_reg, in, local_port, 1);
1289 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1290 				   MLX5_REG_PMLP, 0, 0);
1291 	if (err)
1292 		return 0;
1293 
1294 	return MLX5_GET(pmlp_reg, out, width);
1295 }
1296 
fec_active_mode(struct mlx5_core_dev * mdev)1297 static int fec_active_mode(struct mlx5_core_dev *mdev)
1298 {
1299 	unsigned long fec_active_long;
1300 	u32 fec_active;
1301 
1302 	if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1303 		return MLX5E_FEC_NOFEC;
1304 
1305 	fec_active_long = fec_active;
1306 	return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1307 }
1308 
1309 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1310 	fec_stats->corrected_blocks.lanes[(idx)] = \
1311 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1312 				      fc_fec_corrected_blocks_lane##idx); \
1313 	fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1314 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1315 				      fc_fec_uncorrectable_blocks_lane##idx); \
1316 })
1317 
fec_set_fc_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt,u8 lanes)1318 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1319 			     u32 *ppcnt, u8 lanes)
1320 {
1321 	if (lanes > 3) { /* 4 lanes */
1322 		MLX5E_STATS_SET_FEC_BLOCK(3);
1323 		MLX5E_STATS_SET_FEC_BLOCK(2);
1324 	}
1325 	if (lanes > 1) /* 2 lanes */
1326 		MLX5E_STATS_SET_FEC_BLOCK(1);
1327 	if (lanes > 0) /* 1 lane */
1328 		MLX5E_STATS_SET_FEC_BLOCK(0);
1329 }
1330 
fec_set_rs_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt)1331 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1332 {
1333 	fec_stats->corrected_blocks.total =
1334 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1335 				      rs_fec_corrected_blocks);
1336 	fec_stats->uncorrectable_blocks.total =
1337 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1338 				      rs_fec_uncorrectable_blocks);
1339 }
1340 
fec_set_block_stats(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1341 static void fec_set_block_stats(struct mlx5e_priv *priv,
1342 				struct ethtool_fec_stats *fec_stats)
1343 {
1344 	struct mlx5_core_dev *mdev = priv->mdev;
1345 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1346 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1347 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1348 	int mode = fec_active_mode(mdev);
1349 
1350 	if (mode == MLX5E_FEC_NOFEC)
1351 		return;
1352 
1353 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1354 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1355 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1356 		return;
1357 
1358 	switch (mode) {
1359 	case MLX5E_FEC_RS_528_514:
1360 	case MLX5E_FEC_RS_544_514:
1361 	case MLX5E_FEC_LLRS_272_257_1:
1362 		fec_set_rs_stats(fec_stats, out);
1363 		return;
1364 	case MLX5E_FEC_FIRECODE:
1365 		fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1366 	}
1367 }
1368 
fec_set_corrected_bits_total(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1369 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1370 					 struct ethtool_fec_stats *fec_stats)
1371 {
1372 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1373 	struct mlx5_core_dev *mdev = priv->mdev;
1374 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1375 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1376 
1377 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1378 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1379 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1380 				 sz, MLX5_REG_PPCNT, 0, 0))
1381 		return;
1382 
1383 	fec_stats->corrected_bits.total =
1384 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1385 				      phys_layer_statistical_cntrs,
1386 				      phy_corrected_bits);
1387 }
1388 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1389 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1390 			 struct ethtool_fec_stats *fec_stats)
1391 {
1392 	if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1393 		return;
1394 
1395 	fec_set_corrected_bits_total(priv, fec_stats);
1396 	fec_set_block_stats(priv, fec_stats);
1397 }
1398 
1399 #define PPORT_ETH_EXT_OFF(c) \
1400 	MLX5_BYTE_OFF(ppcnt_reg, \
1401 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1402 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1403 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1404 };
1405 
1406 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1407 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1408 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1409 {
1410 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1411 		return NUM_PPORT_ETH_EXT_COUNTERS;
1412 
1413 	return 0;
1414 }
1415 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1416 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1417 {
1418 	int i;
1419 
1420 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1421 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1422 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1423 			       pport_eth_ext_stats_desc[i].format);
1424 	return idx;
1425 }
1426 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1427 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1428 {
1429 	int i;
1430 
1431 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1432 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1433 			data[idx++] =
1434 				MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1435 						    pport_eth_ext_stats_desc, i);
1436 	return idx;
1437 }
1438 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1439 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1440 {
1441 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1442 	struct mlx5_core_dev *mdev = priv->mdev;
1443 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1444 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1445 	void *out;
1446 
1447 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1448 		return;
1449 
1450 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1451 	out = pstats->eth_ext_counters;
1452 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1453 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1454 }
1455 
1456 #define PCIE_PERF_OFF(c) \
1457 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1458 static const struct counter_desc pcie_perf_stats_desc[] = {
1459 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1460 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1461 };
1462 
1463 #define PCIE_PERF_OFF64(c) \
1464 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1465 static const struct counter_desc pcie_perf_stats_desc64[] = {
1466 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1467 };
1468 
1469 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1470 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1471 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1472 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1473 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1474 };
1475 
1476 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1477 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1478 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1479 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1480 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1481 {
1482 	int num_stats = 0;
1483 
1484 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1485 		num_stats += NUM_PCIE_PERF_COUNTERS;
1486 
1487 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1488 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1489 
1490 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1491 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1492 
1493 	return num_stats;
1494 }
1495 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1496 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1497 {
1498 	int i;
1499 
1500 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1501 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1502 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1503 			       pcie_perf_stats_desc[i].format);
1504 
1505 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1506 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1507 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1508 			       pcie_perf_stats_desc64[i].format);
1509 
1510 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1511 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1512 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1513 			       pcie_perf_stall_stats_desc[i].format);
1514 	return idx;
1515 }
1516 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1517 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1518 {
1519 	int i;
1520 
1521 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1522 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1523 			data[idx++] =
1524 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1525 						    pcie_perf_stats_desc, i);
1526 
1527 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1528 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1529 			data[idx++] =
1530 				MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1531 						    pcie_perf_stats_desc64, i);
1532 
1533 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1534 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1535 			data[idx++] =
1536 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1537 						    pcie_perf_stall_stats_desc, i);
1538 	return idx;
1539 }
1540 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1541 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1542 {
1543 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1544 	struct mlx5_core_dev *mdev = priv->mdev;
1545 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1546 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1547 	void *out;
1548 
1549 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1550 		return;
1551 
1552 	out = pcie_stats->pcie_perf_counters;
1553 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1554 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1555 }
1556 
1557 #define PPORT_PER_TC_PRIO_OFF(c) \
1558 	MLX5_BYTE_OFF(ppcnt_reg, \
1559 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1560 
1561 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1562 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1563 };
1564 
1565 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1566 
1567 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1568 	MLX5_BYTE_OFF(ppcnt_reg, \
1569 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1570 
1571 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1572 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1573 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1574 };
1575 
1576 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1577 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1578 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1579 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1580 {
1581 	struct mlx5_core_dev *mdev = priv->mdev;
1582 
1583 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1584 		return 0;
1585 
1586 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1587 }
1588 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1589 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1590 {
1591 	struct mlx5_core_dev *mdev = priv->mdev;
1592 	int i, prio;
1593 
1594 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1595 		return idx;
1596 
1597 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1598 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1599 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1600 				pport_per_tc_prio_stats_desc[i].format, prio);
1601 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1602 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1603 				pport_per_tc_congest_prio_stats_desc[i].format, prio);
1604 	}
1605 
1606 	return idx;
1607 }
1608 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1609 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1610 {
1611 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1612 	struct mlx5_core_dev *mdev = priv->mdev;
1613 	int i, prio;
1614 
1615 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1616 		return idx;
1617 
1618 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1619 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1620 			data[idx++] =
1621 				MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1622 						    pport_per_tc_prio_stats_desc, i);
1623 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1624 			data[idx++] =
1625 				MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1626 						    pport_per_tc_congest_prio_stats_desc, i);
1627 	}
1628 
1629 	return idx;
1630 }
1631 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1632 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1633 {
1634 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1635 	struct mlx5_core_dev *mdev = priv->mdev;
1636 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1637 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1638 	void *out;
1639 	int prio;
1640 
1641 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1642 		return;
1643 
1644 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1645 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1646 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1647 		out = pstats->per_tc_prio_counters[prio];
1648 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1649 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1650 	}
1651 }
1652 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1653 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1654 {
1655 	struct mlx5_core_dev *mdev = priv->mdev;
1656 
1657 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1658 		return 0;
1659 
1660 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1661 }
1662 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1663 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1664 {
1665 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1666 	struct mlx5_core_dev *mdev = priv->mdev;
1667 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1668 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1669 	void *out;
1670 	int prio;
1671 
1672 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1673 		return;
1674 
1675 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1676 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1677 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1678 		out = pstats->per_tc_congest_prio_counters[prio];
1679 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1680 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1681 	}
1682 }
1683 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1684 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1685 {
1686 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1687 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1688 }
1689 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1690 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1691 {
1692 	mlx5e_grp_per_tc_prio_update_stats(priv);
1693 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1694 }
1695 
1696 #define PPORT_PER_PRIO_OFF(c) \
1697 	MLX5_BYTE_OFF(ppcnt_reg, \
1698 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1699 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1700 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1701 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1702 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1703 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1704 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1705 };
1706 
1707 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1708 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1709 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1710 {
1711 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1712 }
1713 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1714 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1715 						   u8 *data,
1716 						   int idx)
1717 {
1718 	int i, prio;
1719 
1720 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1721 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1722 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1723 				pport_per_prio_traffic_stats_desc[i].format, prio);
1724 	}
1725 
1726 	return idx;
1727 }
1728 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1729 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1730 						 u64 *data,
1731 						 int idx)
1732 {
1733 	int i, prio;
1734 
1735 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1736 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1737 			data[idx++] =
1738 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1739 						    pport_per_prio_traffic_stats_desc, i);
1740 	}
1741 
1742 	return idx;
1743 }
1744 
1745 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1746 	/* %s is "global" or "prio{i}" */
1747 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1748 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1749 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1750 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1751 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1752 };
1753 
1754 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1755 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1756 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1757 };
1758 
1759 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1760 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1761 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1762 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1763 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1764 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1765 {
1766 	struct mlx5_core_dev *mdev = priv->mdev;
1767 	u8 pfc_en_tx;
1768 	u8 pfc_en_rx;
1769 	int err;
1770 
1771 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1772 		return 0;
1773 
1774 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1775 
1776 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1777 }
1778 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1779 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1780 {
1781 	struct mlx5_core_dev *mdev = priv->mdev;
1782 	u32 rx_pause;
1783 	u32 tx_pause;
1784 	int err;
1785 
1786 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1787 		return false;
1788 
1789 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1790 
1791 	return err ? false : rx_pause | tx_pause;
1792 }
1793 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1794 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1795 {
1796 	return (mlx5e_query_global_pause_combined(priv) +
1797 		hweight8(mlx5e_query_pfc_combined(priv))) *
1798 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1799 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1800 }
1801 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1802 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1803 					       u8 *data,
1804 					       int idx)
1805 {
1806 	unsigned long pfc_combined;
1807 	int i, prio;
1808 
1809 	pfc_combined = mlx5e_query_pfc_combined(priv);
1810 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1811 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1812 			char pfc_string[ETH_GSTRING_LEN];
1813 
1814 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1815 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1816 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1817 		}
1818 	}
1819 
1820 	if (mlx5e_query_global_pause_combined(priv)) {
1821 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1822 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1823 				pport_per_prio_pfc_stats_desc[i].format, "global");
1824 		}
1825 	}
1826 
1827 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1828 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1829 		       pport_pfc_stall_stats_desc[i].format);
1830 
1831 	return idx;
1832 }
1833 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1834 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1835 					     u64 *data,
1836 					     int idx)
1837 {
1838 	unsigned long pfc_combined;
1839 	int i, prio;
1840 
1841 	pfc_combined = mlx5e_query_pfc_combined(priv);
1842 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1843 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1844 			data[idx++] =
1845 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1846 						    pport_per_prio_pfc_stats_desc, i);
1847 		}
1848 	}
1849 
1850 	if (mlx5e_query_global_pause_combined(priv)) {
1851 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1852 			data[idx++] =
1853 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1854 						    pport_per_prio_pfc_stats_desc, i);
1855 		}
1856 	}
1857 
1858 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1859 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1860 						  pport_pfc_stall_stats_desc, i);
1861 
1862 	return idx;
1863 }
1864 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1865 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1866 {
1867 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1868 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1869 }
1870 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1871 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1872 {
1873 	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1874 	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1875 	return idx;
1876 }
1877 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1878 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1879 {
1880 	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1881 	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1882 	return idx;
1883 }
1884 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1885 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1886 {
1887 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1888 	struct mlx5_core_dev *mdev = priv->mdev;
1889 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1890 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1891 	int prio;
1892 	void *out;
1893 
1894 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1895 		return;
1896 
1897 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1898 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1899 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1900 		out = pstats->per_prio_counters[prio];
1901 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1902 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1903 				     MLX5_REG_PPCNT, 0, 0);
1904 	}
1905 }
1906 
1907 static const struct counter_desc mlx5e_pme_status_desc[] = {
1908 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1909 };
1910 
1911 static const struct counter_desc mlx5e_pme_error_desc[] = {
1912 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1913 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1914 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1915 };
1916 
1917 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1918 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1919 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)1920 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1921 {
1922 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1923 }
1924 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)1925 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1926 {
1927 	int i;
1928 
1929 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1930 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1931 
1932 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1933 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1934 
1935 	return idx;
1936 }
1937 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)1938 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1939 {
1940 	struct mlx5_pme_stats pme_stats;
1941 	int i;
1942 
1943 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
1944 
1945 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1946 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1947 						   mlx5e_pme_status_desc, i);
1948 
1949 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1950 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1951 						   mlx5e_pme_error_desc, i);
1952 
1953 	return idx;
1954 }
1955 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)1956 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1957 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)1958 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1959 {
1960 	return mlx5e_ktls_get_count(priv);
1961 }
1962 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)1963 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1964 {
1965 	return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1966 }
1967 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)1968 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1969 {
1970 	return idx + mlx5e_ktls_get_stats(priv, data + idx);
1971 }
1972 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)1973 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1974 
1975 static const struct counter_desc rq_stats_desc[] = {
1976 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1977 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1978 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1979 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1980 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1981 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1982 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1983 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1984 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1985 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1986 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1987 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1988 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1989 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1990 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1991 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1992 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1993 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1994 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1995 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1996 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1997 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1998 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1999 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2000 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2001 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2002 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
2003 #ifdef CONFIG_MLX5_EN_ARFS
2004 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) },
2005 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) },
2006 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) },
2007 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) },
2008 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
2009 #endif
2010 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
2011 #ifdef CONFIG_PAGE_POOL_STATS
2012 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
2013 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
2014 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
2015 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2016 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2017 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2018 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2019 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2020 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2021 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2022 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2023 #endif
2024 #ifdef CONFIG_MLX5_EN_TLS
2025 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2026 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
2027 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2028 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2029 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2030 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2031 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
2032 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
2033 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2034 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2035 #endif
2036 };
2037 
2038 static const struct counter_desc sq_stats_desc[] = {
2039 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2040 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2041 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2042 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2043 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2044 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2045 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2046 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2047 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2048 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2049 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2050 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2051 #ifdef CONFIG_MLX5_EN_TLS
2052 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2053 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2054 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2055 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2056 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2057 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2058 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2059 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2060 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2061 #endif
2062 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2063 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2064 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2065 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2066 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2067 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2068 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2069 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2070 };
2071 
2072 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2073 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2074 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2075 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2076 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2077 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2078 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2079 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2080 };
2081 
2082 static const struct counter_desc xdpsq_stats_desc[] = {
2083 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2084 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2085 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2086 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2087 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2088 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2089 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2090 };
2091 
2092 static const struct counter_desc xskrq_stats_desc[] = {
2093 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2094 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2095 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2096 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2097 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2098 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2099 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2100 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2101 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2102 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2103 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2104 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2105 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2106 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2107 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2108 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2109 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2110 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2111 };
2112 
2113 static const struct counter_desc xsksq_stats_desc[] = {
2114 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2115 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2116 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2117 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2118 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2119 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2120 };
2121 
2122 static const struct counter_desc ch_stats_desc[] = {
2123 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2124 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2125 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2126 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2127 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2128 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2129 };
2130 
2131 static const struct counter_desc ptp_sq_stats_desc[] = {
2132 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2133 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2134 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2135 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2136 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2137 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2138 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2139 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2140 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2141 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2142 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2143 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2144 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2145 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2146 };
2147 
2148 static const struct counter_desc ptp_ch_stats_desc[] = {
2149 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2150 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2151 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2152 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2153 };
2154 
2155 static const struct counter_desc ptp_cq_stats_desc[] = {
2156 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2157 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2158 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2159 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2160 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
2161 };
2162 
2163 static const struct counter_desc ptp_rq_stats_desc[] = {
2164 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2165 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2166 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2167 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2168 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2169 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2170 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2171 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2172 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2173 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2174 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2175 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2176 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2177 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2178 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2179 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2180 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2181 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2182 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2183 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2184 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2185 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2186 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2187 };
2188 
2189 static const struct counter_desc qos_sq_stats_desc[] = {
2190 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2191 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2192 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2193 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2194 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2195 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2196 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2197 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2198 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2199 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2200 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2201 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2202 #ifdef CONFIG_MLX5_EN_TLS
2203 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2204 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2205 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2206 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2207 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2208 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2209 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2210 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2211 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2212 #endif
2213 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2214 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2215 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2216 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2217 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2218 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2219 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2220 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2221 };
2222 
2223 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
2224 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
2225 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
2226 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
2227 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
2228 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
2229 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
2230 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
2231 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
2232 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
2233 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2234 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
2235 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)2236 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2237 {
2238 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2239 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2240 }
2241 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2242 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2243 {
2244 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2245 	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2246 	int i, qid;
2247 
2248 	for (qid = 0; qid < max_qos_sqs; qid++)
2249 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2250 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2251 				qos_sq_stats_desc[i].format, qid);
2252 
2253 	return idx;
2254 }
2255 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2256 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2257 {
2258 	struct mlx5e_sq_stats **stats;
2259 	u16 max_qos_sqs;
2260 	int i, qid;
2261 
2262 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2263 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2264 	stats = READ_ONCE(priv->htb_qos_sq_stats);
2265 
2266 	for (qid = 0; qid < max_qos_sqs; qid++) {
2267 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2268 
2269 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2270 			data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2271 	}
2272 
2273 	return idx;
2274 }
2275 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2276 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2277 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2278 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2279 {
2280 	int num = NUM_PTP_CH_STATS;
2281 
2282 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2283 		return 0;
2284 
2285 	if (priv->tx_ptp_opened)
2286 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2287 	if (priv->rx_ptp_opened)
2288 		num += NUM_PTP_RQ_STATS;
2289 
2290 	return num;
2291 }
2292 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2293 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2294 {
2295 	int i, tc;
2296 
2297 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2298 		return idx;
2299 
2300 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2301 		sprintf(data + (idx++) * ETH_GSTRING_LEN,
2302 			"%s", ptp_ch_stats_desc[i].format);
2303 
2304 	if (priv->tx_ptp_opened) {
2305 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2306 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2307 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2308 					ptp_sq_stats_desc[i].format, tc);
2309 
2310 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2311 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2312 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2313 					ptp_cq_stats_desc[i].format, tc);
2314 	}
2315 	if (priv->rx_ptp_opened) {
2316 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2317 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2318 				ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2319 	}
2320 	return idx;
2321 }
2322 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2323 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2324 {
2325 	int i, tc;
2326 
2327 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2328 		return idx;
2329 
2330 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2331 		data[idx++] =
2332 			MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2333 					     ptp_ch_stats_desc, i);
2334 
2335 	if (priv->tx_ptp_opened) {
2336 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2337 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2338 				data[idx++] =
2339 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2340 							     ptp_sq_stats_desc, i);
2341 
2342 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2343 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2344 				data[idx++] =
2345 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2346 							     ptp_cq_stats_desc, i);
2347 	}
2348 	if (priv->rx_ptp_opened) {
2349 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2350 			data[idx++] =
2351 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2352 						     ptp_rq_stats_desc, i);
2353 	}
2354 	return idx;
2355 }
2356 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2357 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2358 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2359 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2360 {
2361 	int max_nch = priv->stats_nch;
2362 
2363 	return (NUM_RQ_STATS * max_nch) +
2364 	       (NUM_CH_STATS * max_nch) +
2365 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2366 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2367 	       (NUM_XDPSQ_STATS * max_nch) +
2368 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2369 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2370 }
2371 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2372 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2373 {
2374 	bool is_xsk = priv->xsk.ever_used;
2375 	int max_nch = priv->stats_nch;
2376 	int i, j, tc;
2377 
2378 	for (i = 0; i < max_nch; i++)
2379 		for (j = 0; j < NUM_CH_STATS; j++)
2380 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2381 				ch_stats_desc[j].format, i);
2382 
2383 	for (i = 0; i < max_nch; i++) {
2384 		for (j = 0; j < NUM_RQ_STATS; j++)
2385 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2386 				rq_stats_desc[j].format, i);
2387 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2388 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2389 				xskrq_stats_desc[j].format, i);
2390 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2391 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2392 				rq_xdpsq_stats_desc[j].format, i);
2393 	}
2394 
2395 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2396 		for (i = 0; i < max_nch; i++)
2397 			for (j = 0; j < NUM_SQ_STATS; j++)
2398 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2399 					sq_stats_desc[j].format,
2400 					i + tc * max_nch);
2401 
2402 	for (i = 0; i < max_nch; i++) {
2403 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2404 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2405 				xsksq_stats_desc[j].format, i);
2406 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2407 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2408 				xdpsq_stats_desc[j].format, i);
2409 	}
2410 
2411 	return idx;
2412 }
2413 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2414 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2415 {
2416 	bool is_xsk = priv->xsk.ever_used;
2417 	int max_nch = priv->stats_nch;
2418 	int i, j, tc;
2419 
2420 	for (i = 0; i < max_nch; i++)
2421 		for (j = 0; j < NUM_CH_STATS; j++)
2422 			data[idx++] =
2423 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2424 						     ch_stats_desc, j);
2425 
2426 	for (i = 0; i < max_nch; i++) {
2427 		for (j = 0; j < NUM_RQ_STATS; j++)
2428 			data[idx++] =
2429 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2430 						     rq_stats_desc, j);
2431 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2432 			data[idx++] =
2433 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2434 						     xskrq_stats_desc, j);
2435 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2436 			data[idx++] =
2437 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2438 						     rq_xdpsq_stats_desc, j);
2439 	}
2440 
2441 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2442 		for (i = 0; i < max_nch; i++)
2443 			for (j = 0; j < NUM_SQ_STATS; j++)
2444 				data[idx++] =
2445 					MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2446 							     sq_stats_desc, j);
2447 
2448 	for (i = 0; i < max_nch; i++) {
2449 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2450 			data[idx++] =
2451 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2452 						     xsksq_stats_desc, j);
2453 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2454 			data[idx++] =
2455 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2456 						     xdpsq_stats_desc, j);
2457 	}
2458 
2459 	return idx;
2460 }
2461 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2462 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2463 
2464 MLX5E_DEFINE_STATS_GRP(sw, 0);
2465 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2466 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2467 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2468 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2469 MLX5E_DEFINE_STATS_GRP(2863, 0);
2470 MLX5E_DEFINE_STATS_GRP(2819, 0);
2471 MLX5E_DEFINE_STATS_GRP(phy, 0);
2472 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2473 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2474 MLX5E_DEFINE_STATS_GRP(pme, 0);
2475 MLX5E_DEFINE_STATS_GRP(channels, 0);
2476 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2477 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2478 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2479 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2480 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2481 
2482 /* The stats groups order is opposite to the update_stats() order calls */
2483 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2484 	&MLX5E_STATS_GRP(sw),
2485 	&MLX5E_STATS_GRP(qcnt),
2486 	&MLX5E_STATS_GRP(vnic_env),
2487 	&MLX5E_STATS_GRP(vport),
2488 	&MLX5E_STATS_GRP(802_3),
2489 	&MLX5E_STATS_GRP(2863),
2490 	&MLX5E_STATS_GRP(2819),
2491 	&MLX5E_STATS_GRP(phy),
2492 	&MLX5E_STATS_GRP(eth_ext),
2493 	&MLX5E_STATS_GRP(pcie),
2494 	&MLX5E_STATS_GRP(per_prio),
2495 	&MLX5E_STATS_GRP(pme),
2496 #ifdef CONFIG_MLX5_EN_IPSEC
2497 	&MLX5E_STATS_GRP(ipsec_hw),
2498 	&MLX5E_STATS_GRP(ipsec_sw),
2499 #endif
2500 	&MLX5E_STATS_GRP(tls),
2501 	&MLX5E_STATS_GRP(channels),
2502 	&MLX5E_STATS_GRP(per_port_buff_congest),
2503 	&MLX5E_STATS_GRP(ptp),
2504 	&MLX5E_STATS_GRP(qos),
2505 #ifdef CONFIG_MLX5_MACSEC
2506 	&MLX5E_STATS_GRP(macsec_hw),
2507 #endif
2508 };
2509 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2510 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2511 {
2512 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2513 }
2514