1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 #include "en/port.h"
39 
40 #ifdef CONFIG_PAGE_POOL_STATS
41 #include <net/page_pool.h>
42 #endif
43 
44 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
45 {
46 	return !priv->profile->stats_grps_num ? 0 :
47 		priv->profile->stats_grps_num(priv);
48 }
49 
50 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
51 {
52 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
53 	const unsigned int num_stats_grps = stats_grps_num(priv);
54 	unsigned int total = 0;
55 	int i;
56 
57 	for (i = 0; i < num_stats_grps; i++)
58 		total += stats_grps[i]->get_num_stats(priv);
59 
60 	return total;
61 }
62 
63 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
64 {
65 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
66 	const unsigned int num_stats_grps = stats_grps_num(priv);
67 	int i;
68 
69 	for (i = num_stats_grps - 1; i >= 0; i--)
70 		if (stats_grps[i]->update_stats &&
71 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
72 			stats_grps[i]->update_stats(priv);
73 }
74 
75 void mlx5e_stats_update(struct mlx5e_priv *priv)
76 {
77 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
78 	const unsigned int num_stats_grps = stats_grps_num(priv);
79 	int i;
80 
81 	for (i = num_stats_grps - 1; i >= 0; i--)
82 		if (stats_grps[i]->update_stats)
83 			stats_grps[i]->update_stats(priv);
84 }
85 
86 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
87 {
88 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
89 	const unsigned int num_stats_grps = stats_grps_num(priv);
90 	int i;
91 
92 	for (i = 0; i < num_stats_grps; i++)
93 		idx = stats_grps[i]->fill_stats(priv, data, idx);
94 }
95 
96 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
97 {
98 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
99 	const unsigned int num_stats_grps = stats_grps_num(priv);
100 	int i, idx = 0;
101 
102 	for (i = 0; i < num_stats_grps; i++)
103 		idx = stats_grps[i]->fill_strings(priv, data, idx);
104 }
105 
106 /* Concrete NIC Stats */
107 
108 static const struct counter_desc sw_stats_desc[] = {
109 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
110 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
111 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
116 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
121 
122 #ifdef CONFIG_MLX5_EN_TLS
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
127 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
132 #endif
133 
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
185 #ifdef CONFIG_PAGE_POOL_STATS
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
189 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
191 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
197 #endif
198 #ifdef CONFIG_MLX5_EN_TLS
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
209 #endif
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
223 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
224 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
225 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
226 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
227 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
228 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
229 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
230 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
231 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
232 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
233 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
234 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
235 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
236 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
237 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
238 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
239 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
240 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
241 };
242 
243 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
244 
245 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
246 {
247 	return NUM_SW_COUNTERS;
248 }
249 
250 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
251 {
252 	int i;
253 
254 	for (i = 0; i < NUM_SW_COUNTERS; i++)
255 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
256 	return idx;
257 }
258 
259 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
260 {
261 	int i;
262 
263 	for (i = 0; i < NUM_SW_COUNTERS; i++)
264 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
265 	return idx;
266 }
267 
268 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
269 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
270 {
271 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
272 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
273 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
274 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
275 	s->tx_xdp_full  += xdpsq_red_stats->full;
276 	s->tx_xdp_err   += xdpsq_red_stats->err;
277 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
278 }
279 
280 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
281 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
282 {
283 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
284 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
285 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
286 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
287 	s->rx_xdp_tx_full  += xdpsq_stats->full;
288 	s->rx_xdp_tx_err   += xdpsq_stats->err;
289 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
290 }
291 
292 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
293 						  struct mlx5e_xdpsq_stats *xsksq_stats)
294 {
295 	s->tx_xsk_xmit  += xsksq_stats->xmit;
296 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
297 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
298 	s->tx_xsk_full  += xsksq_stats->full;
299 	s->tx_xsk_err   += xsksq_stats->err;
300 	s->tx_xsk_cqes  += xsksq_stats->cqes;
301 }
302 
303 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
304 						  struct mlx5e_rq_stats *xskrq_stats)
305 {
306 	s->rx_xsk_packets                += xskrq_stats->packets;
307 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
308 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
309 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
310 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
311 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
312 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
313 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
314 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
315 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
316 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
317 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
318 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
319 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
320 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
321 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
322 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
323 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
324 	s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
325 }
326 
327 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
328 						     struct mlx5e_rq_stats *rq_stats)
329 {
330 	s->rx_packets                 += rq_stats->packets;
331 	s->rx_bytes                   += rq_stats->bytes;
332 	s->rx_lro_packets             += rq_stats->lro_packets;
333 	s->rx_lro_bytes               += rq_stats->lro_bytes;
334 	s->rx_gro_packets             += rq_stats->gro_packets;
335 	s->rx_gro_bytes               += rq_stats->gro_bytes;
336 	s->rx_gro_skbs                += rq_stats->gro_skbs;
337 	s->rx_gro_match_packets       += rq_stats->gro_match_packets;
338 	s->rx_gro_large_hds           += rq_stats->gro_large_hds;
339 	s->rx_ecn_mark                += rq_stats->ecn_mark;
340 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
341 	s->rx_csum_none               += rq_stats->csum_none;
342 	s->rx_csum_complete           += rq_stats->csum_complete;
343 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
344 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
345 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
346 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
347 	s->rx_xdp_drop                += rq_stats->xdp_drop;
348 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
349 	s->rx_wqe_err                 += rq_stats->wqe_err;
350 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
351 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
352 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
353 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
354 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
355 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
356 	s->rx_congst_umr              += rq_stats->congst_umr;
357 	s->rx_arfs_err                += rq_stats->arfs_err;
358 	s->rx_recover                 += rq_stats->recover;
359 #ifdef CONFIG_PAGE_POOL_STATS
360 	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
361 	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
362 	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
363 	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
364 	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
365 	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
366 	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
367 	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
368 	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
369 	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
370 	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
371 #endif
372 #ifdef CONFIG_MLX5_EN_TLS
373 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
374 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
375 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
376 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
377 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
378 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
379 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
380 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
381 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
382 	s->rx_tls_err                 += rq_stats->tls_err;
383 #endif
384 }
385 
386 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
387 						     struct mlx5e_ch_stats *ch_stats)
388 {
389 	s->ch_events      += ch_stats->events;
390 	s->ch_poll        += ch_stats->poll;
391 	s->ch_arm         += ch_stats->arm;
392 	s->ch_aff_change  += ch_stats->aff_change;
393 	s->ch_force_irq   += ch_stats->force_irq;
394 	s->ch_eq_rearm    += ch_stats->eq_rearm;
395 }
396 
397 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
398 					       struct mlx5e_sq_stats *sq_stats)
399 {
400 	s->tx_packets               += sq_stats->packets;
401 	s->tx_bytes                 += sq_stats->bytes;
402 	s->tx_tso_packets           += sq_stats->tso_packets;
403 	s->tx_tso_bytes             += sq_stats->tso_bytes;
404 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
405 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
406 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
407 	s->tx_nop                   += sq_stats->nop;
408 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
409 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
410 	s->tx_queue_stopped         += sq_stats->stopped;
411 	s->tx_queue_wake            += sq_stats->wake;
412 	s->tx_queue_dropped         += sq_stats->dropped;
413 	s->tx_cqe_err               += sq_stats->cqe_err;
414 	s->tx_recover               += sq_stats->recover;
415 	s->tx_xmit_more             += sq_stats->xmit_more;
416 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
417 	s->tx_csum_none             += sq_stats->csum_none;
418 	s->tx_csum_partial          += sq_stats->csum_partial;
419 #ifdef CONFIG_MLX5_EN_TLS
420 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
421 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
422 	s->tx_tls_ooo               += sq_stats->tls_ooo;
423 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
424 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
425 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
426 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
427 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
428 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
429 #endif
430 	s->tx_cqes                  += sq_stats->cqes;
431 }
432 
433 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
434 						struct mlx5e_sw_stats *s)
435 {
436 	int i;
437 
438 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
439 		return;
440 
441 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
442 
443 	if (priv->tx_ptp_opened) {
444 		for (i = 0; i < priv->max_opened_tc; i++) {
445 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
446 
447 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
448 			barrier();
449 		}
450 	}
451 	if (priv->rx_ptp_opened) {
452 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
453 
454 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
455 		barrier();
456 	}
457 }
458 
459 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
460 						struct mlx5e_sw_stats *s)
461 {
462 	struct mlx5e_sq_stats **stats;
463 	u16 max_qos_sqs;
464 	int i;
465 
466 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
467 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
468 	stats = READ_ONCE(priv->htb_qos_sq_stats);
469 
470 	for (i = 0; i < max_qos_sqs; i++) {
471 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
472 
473 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
474 		barrier();
475 	}
476 }
477 
478 #ifdef CONFIG_PAGE_POOL_STATS
479 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
480 {
481 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
482 	struct page_pool *pool = c->rq.page_pool;
483 	struct page_pool_stats stats = { 0 };
484 
485 	if (!page_pool_get_stats(pool, &stats))
486 		return;
487 
488 	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
489 	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
490 	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
491 	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
492 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
493 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
494 
495 	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
496 	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
497 	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
498 	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
499 	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
500 }
501 #else
502 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
503 {
504 }
505 #endif
506 
507 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
508 {
509 	struct mlx5e_sw_stats *s = &priv->stats.sw;
510 	int i;
511 
512 	memset(s, 0, sizeof(*s));
513 
514 	for (i = 0; i < priv->channels.num; i++) /* for active channels only */
515 		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
516 
517 	for (i = 0; i < priv->stats_nch; i++) {
518 		struct mlx5e_channel_stats *channel_stats =
519 			priv->channel_stats[i];
520 
521 		int j;
522 
523 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
524 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
525 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
526 		/* xdp redirect */
527 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
528 		/* AF_XDP zero-copy */
529 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
530 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
531 
532 		for (j = 0; j < priv->max_opened_tc; j++) {
533 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
534 
535 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
536 			barrier();
537 		}
538 	}
539 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
540 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
541 }
542 
543 static const struct counter_desc q_stats_desc[] = {
544 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
545 };
546 
547 static const struct counter_desc drop_rq_stats_desc[] = {
548 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
549 };
550 
551 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
552 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
553 
554 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
555 {
556 	int num_stats = 0;
557 
558 	if (priv->q_counter)
559 		num_stats += NUM_Q_COUNTERS;
560 
561 	if (priv->drop_rq_q_counter)
562 		num_stats += NUM_DROP_RQ_COUNTERS;
563 
564 	return num_stats;
565 }
566 
567 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
568 {
569 	int i;
570 
571 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
572 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
573 		       q_stats_desc[i].format);
574 
575 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
576 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
577 		       drop_rq_stats_desc[i].format);
578 
579 	return idx;
580 }
581 
582 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
583 {
584 	int i;
585 
586 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
587 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
588 						   q_stats_desc, i);
589 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
590 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
591 						   drop_rq_stats_desc, i);
592 	return idx;
593 }
594 
595 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
596 {
597 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
598 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
599 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
600 	int ret;
601 
602 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
603 
604 	if (priv->q_counter) {
605 		MLX5_SET(query_q_counter_in, in, counter_set_id,
606 			 priv->q_counter);
607 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
608 		if (!ret)
609 			qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
610 							  out, out_of_buffer);
611 	}
612 
613 	if (priv->drop_rq_q_counter) {
614 		MLX5_SET(query_q_counter_in, in, counter_set_id,
615 			 priv->drop_rq_q_counter);
616 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
617 		if (!ret)
618 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
619 							    out, out_of_buffer);
620 	}
621 }
622 
623 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
624 static const struct counter_desc vnic_env_stats_steer_desc[] = {
625 	{ "rx_steer_missed_packets",
626 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
627 };
628 
629 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
630 	{ "dev_internal_queue_oob",
631 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
632 };
633 
634 static const struct counter_desc vnic_env_stats_drop_desc[] = {
635 	{ "rx_oversize_pkts_buffer",
636 		VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
637 };
638 
639 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
640 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
641 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
642 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
643 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
644 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
645 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
646 	(MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
647 	 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
648 
649 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
650 {
651 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
652 	       NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
653 	       NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
654 }
655 
656 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
657 {
658 	int i;
659 
660 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
661 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
662 		       vnic_env_stats_steer_desc[i].format);
663 
664 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
665 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
666 		       vnic_env_stats_dev_oob_desc[i].format);
667 
668 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
669 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
670 		       vnic_env_stats_drop_desc[i].format);
671 
672 	return idx;
673 }
674 
675 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
676 {
677 	int i;
678 
679 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
680 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
681 						  vnic_env_stats_steer_desc, i);
682 
683 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
684 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
685 						  vnic_env_stats_dev_oob_desc, i);
686 
687 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
688 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
689 						  vnic_env_stats_drop_desc, i);
690 
691 	return idx;
692 }
693 
694 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
695 {
696 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
697 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
698 	struct mlx5_core_dev *mdev = priv->mdev;
699 
700 	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
701 		return;
702 
703 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
704 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
705 }
706 
707 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
708 static const struct counter_desc vport_stats_desc[] = {
709 	{ "rx_vport_unicast_packets",
710 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
711 	{ "rx_vport_unicast_bytes",
712 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
713 	{ "tx_vport_unicast_packets",
714 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
715 	{ "tx_vport_unicast_bytes",
716 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
717 	{ "rx_vport_multicast_packets",
718 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
719 	{ "rx_vport_multicast_bytes",
720 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
721 	{ "tx_vport_multicast_packets",
722 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
723 	{ "tx_vport_multicast_bytes",
724 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
725 	{ "rx_vport_broadcast_packets",
726 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
727 	{ "rx_vport_broadcast_bytes",
728 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
729 	{ "tx_vport_broadcast_packets",
730 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
731 	{ "tx_vport_broadcast_bytes",
732 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
733 	{ "rx_vport_rdma_unicast_packets",
734 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
735 	{ "rx_vport_rdma_unicast_bytes",
736 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
737 	{ "tx_vport_rdma_unicast_packets",
738 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
739 	{ "tx_vport_rdma_unicast_bytes",
740 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
741 	{ "rx_vport_rdma_multicast_packets",
742 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
743 	{ "rx_vport_rdma_multicast_bytes",
744 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
745 	{ "tx_vport_rdma_multicast_packets",
746 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
747 	{ "tx_vport_rdma_multicast_bytes",
748 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
749 };
750 
751 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
752 
753 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
754 {
755 	return NUM_VPORT_COUNTERS;
756 }
757 
758 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
759 {
760 	int i;
761 
762 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
763 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
764 	return idx;
765 }
766 
767 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
768 {
769 	int i;
770 
771 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
772 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
773 						  vport_stats_desc, i);
774 	return idx;
775 }
776 
777 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
778 {
779 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
780 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
781 	struct mlx5_core_dev *mdev = priv->mdev;
782 
783 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
784 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
785 }
786 
787 #define PPORT_802_3_OFF(c) \
788 	MLX5_BYTE_OFF(ppcnt_reg, \
789 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
790 static const struct counter_desc pport_802_3_stats_desc[] = {
791 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
792 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
793 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
794 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
795 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
796 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
797 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
798 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
799 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
800 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
801 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
802 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
803 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
804 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
805 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
806 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
807 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
808 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
809 };
810 
811 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
812 
813 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
814 {
815 	return NUM_PPORT_802_3_COUNTERS;
816 }
817 
818 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
819 {
820 	int i;
821 
822 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
823 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
824 	return idx;
825 }
826 
827 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
828 {
829 	int i;
830 
831 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
832 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
833 						  pport_802_3_stats_desc, i);
834 	return idx;
835 }
836 
837 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
838 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
839 
840 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
841 {
842 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
843 	struct mlx5_core_dev *mdev = priv->mdev;
844 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
845 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
846 	void *out;
847 
848 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
849 		return;
850 
851 	MLX5_SET(ppcnt_reg, in, local_port, 1);
852 	out = pstats->IEEE_802_3_counters;
853 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
854 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
855 }
856 
857 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
858 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
859 		MLX5_BYTE_OFF(ppcnt_reg,		\
860 			      counter_set.set.c##_high)))
861 
862 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
863 				u32 *ppcnt_ieee_802_3)
864 {
865 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
866 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
867 
868 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
869 		return -EOPNOTSUPP;
870 
871 	MLX5_SET(ppcnt_reg, in, local_port, 1);
872 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
873 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
874 				    sz, MLX5_REG_PPCNT, 0, 0);
875 }
876 
877 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
878 			   struct ethtool_pause_stats *pause_stats)
879 {
880 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
881 	struct mlx5_core_dev *mdev = priv->mdev;
882 
883 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
884 		return;
885 
886 	pause_stats->tx_pause_frames =
887 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
888 				      eth_802_3_cntrs_grp_data_layout,
889 				      a_pause_mac_ctrl_frames_transmitted);
890 	pause_stats->rx_pause_frames =
891 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
892 				      eth_802_3_cntrs_grp_data_layout,
893 				      a_pause_mac_ctrl_frames_received);
894 }
895 
896 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
897 			     struct ethtool_eth_phy_stats *phy_stats)
898 {
899 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
900 	struct mlx5_core_dev *mdev = priv->mdev;
901 
902 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
903 		return;
904 
905 	phy_stats->SymbolErrorDuringCarrier =
906 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
907 				      eth_802_3_cntrs_grp_data_layout,
908 				      a_symbol_error_during_carrier);
909 }
910 
911 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
912 			     struct ethtool_eth_mac_stats *mac_stats)
913 {
914 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
915 	struct mlx5_core_dev *mdev = priv->mdev;
916 
917 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
918 		return;
919 
920 #define RD(name)							\
921 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
922 			      eth_802_3_cntrs_grp_data_layout,		\
923 			      name)
924 
925 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
926 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
927 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
928 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
929 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
930 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
931 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
932 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
933 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
934 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
935 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
936 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
937 #undef RD
938 }
939 
940 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
941 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
942 {
943 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
944 	struct mlx5_core_dev *mdev = priv->mdev;
945 
946 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
947 		return;
948 
949 	ctrl_stats->MACControlFramesTransmitted =
950 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
951 				      eth_802_3_cntrs_grp_data_layout,
952 				      a_mac_control_frames_transmitted);
953 	ctrl_stats->MACControlFramesReceived =
954 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
955 				      eth_802_3_cntrs_grp_data_layout,
956 				      a_mac_control_frames_received);
957 	ctrl_stats->UnsupportedOpcodesReceived =
958 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
959 				      eth_802_3_cntrs_grp_data_layout,
960 				      a_unsupported_opcodes_received);
961 }
962 
963 #define PPORT_2863_OFF(c) \
964 	MLX5_BYTE_OFF(ppcnt_reg, \
965 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
966 static const struct counter_desc pport_2863_stats_desc[] = {
967 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
968 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
969 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
970 };
971 
972 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
973 
974 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
975 {
976 	return NUM_PPORT_2863_COUNTERS;
977 }
978 
979 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
980 {
981 	int i;
982 
983 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
984 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
985 	return idx;
986 }
987 
988 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
989 {
990 	int i;
991 
992 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
993 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
994 						  pport_2863_stats_desc, i);
995 	return idx;
996 }
997 
998 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
999 {
1000 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1001 	struct mlx5_core_dev *mdev = priv->mdev;
1002 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1003 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1004 	void *out;
1005 
1006 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1007 	out = pstats->RFC_2863_counters;
1008 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1009 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1010 }
1011 
1012 #define PPORT_2819_OFF(c) \
1013 	MLX5_BYTE_OFF(ppcnt_reg, \
1014 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1015 static const struct counter_desc pport_2819_stats_desc[] = {
1016 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1017 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1018 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1019 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1020 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1021 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1022 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1023 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1024 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1025 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1026 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1027 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1028 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1029 };
1030 
1031 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
1032 
1033 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1034 {
1035 	return NUM_PPORT_2819_COUNTERS;
1036 }
1037 
1038 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1039 {
1040 	int i;
1041 
1042 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1043 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
1044 	return idx;
1045 }
1046 
1047 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1048 {
1049 	int i;
1050 
1051 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1052 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1053 						  pport_2819_stats_desc, i);
1054 	return idx;
1055 }
1056 
1057 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1058 {
1059 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1060 	struct mlx5_core_dev *mdev = priv->mdev;
1061 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1062 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1063 	void *out;
1064 
1065 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1066 		return;
1067 
1068 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1069 	out = pstats->RFC_2819_counters;
1070 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1071 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1072 }
1073 
1074 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1075 	{    0,    64 },
1076 	{   65,   127 },
1077 	{  128,   255 },
1078 	{  256,   511 },
1079 	{  512,  1023 },
1080 	{ 1024,  1518 },
1081 	{ 1519,  2047 },
1082 	{ 2048,  4095 },
1083 	{ 4096,  8191 },
1084 	{ 8192, 10239 },
1085 	{}
1086 };
1087 
1088 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1089 			  struct ethtool_rmon_stats *rmon,
1090 			  const struct ethtool_rmon_hist_range **ranges)
1091 {
1092 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1093 	struct mlx5_core_dev *mdev = priv->mdev;
1094 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1095 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1096 
1097 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1098 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1099 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1100 				 sz, MLX5_REG_PPCNT, 0, 0))
1101 		return;
1102 
1103 #define RD(name)						\
1104 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1105 			      eth_2819_cntrs_grp_data_layout,	\
1106 			      name)
1107 
1108 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1109 	rmon->fragments		= RD(ether_stats_fragments);
1110 	rmon->jabbers		= RD(ether_stats_jabbers);
1111 
1112 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1113 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1114 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1115 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1116 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1117 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1118 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1119 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1120 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1121 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1122 #undef RD
1123 
1124 	*ranges = mlx5e_rmon_ranges;
1125 }
1126 
1127 #define PPORT_PHY_STATISTICAL_OFF(c) \
1128 	MLX5_BYTE_OFF(ppcnt_reg, \
1129 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1130 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1131 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1132 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1133 };
1134 
1135 static const struct counter_desc
1136 pport_phy_statistical_err_lanes_stats_desc[] = {
1137 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1138 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1139 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1140 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1141 };
1142 
1143 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1144 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1145 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1146 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1147 
1148 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1149 {
1150 	struct mlx5_core_dev *mdev = priv->mdev;
1151 	int num_stats;
1152 
1153 	/* "1" for link_down_events special counter */
1154 	num_stats = 1;
1155 
1156 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1157 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1158 
1159 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1160 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1161 
1162 	return num_stats;
1163 }
1164 
1165 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1166 {
1167 	struct mlx5_core_dev *mdev = priv->mdev;
1168 	int i;
1169 
1170 	strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1171 
1172 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1173 		return idx;
1174 
1175 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1176 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1177 		       pport_phy_statistical_stats_desc[i].format);
1178 
1179 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1180 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1181 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1182 			       pport_phy_statistical_err_lanes_stats_desc[i].format);
1183 
1184 	return idx;
1185 }
1186 
1187 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1188 {
1189 	struct mlx5_core_dev *mdev = priv->mdev;
1190 	int i;
1191 
1192 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
1193 	data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1194 			       counter_set.phys_layer_cntrs.link_down_events);
1195 
1196 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1197 		return idx;
1198 
1199 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1200 		data[idx++] =
1201 			MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1202 					    pport_phy_statistical_stats_desc, i);
1203 
1204 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1205 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1206 			data[idx++] =
1207 				MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1208 						    pport_phy_statistical_err_lanes_stats_desc,
1209 						    i);
1210 	return idx;
1211 }
1212 
1213 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1214 {
1215 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1216 	struct mlx5_core_dev *mdev = priv->mdev;
1217 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1218 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1219 	void *out;
1220 
1221 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1222 	out = pstats->phy_counters;
1223 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1224 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1225 
1226 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1227 		return;
1228 
1229 	out = pstats->phy_statistical_counters;
1230 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1231 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1232 }
1233 
1234 void mlx5e_get_link_ext_stats(struct net_device *dev,
1235 			      struct ethtool_link_ext_stats *stats)
1236 {
1237 	struct mlx5e_priv *priv = netdev_priv(dev);
1238 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1239 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1240 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1241 
1242 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1243 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1244 	mlx5_core_access_reg(priv->mdev, in, sz, out,
1245 			     MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1246 
1247 	stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1248 					   counter_set.phys_layer_cntrs.link_down_events);
1249 }
1250 
1251 static int fec_num_lanes(struct mlx5_core_dev *dev)
1252 {
1253 	u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1254 	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1255 	int err;
1256 
1257 	MLX5_SET(pmlp_reg, in, local_port, 1);
1258 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1259 				   MLX5_REG_PMLP, 0, 0);
1260 	if (err)
1261 		return 0;
1262 
1263 	return MLX5_GET(pmlp_reg, out, width);
1264 }
1265 
1266 static int fec_active_mode(struct mlx5_core_dev *mdev)
1267 {
1268 	unsigned long fec_active_long;
1269 	u32 fec_active;
1270 
1271 	if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1272 		return MLX5E_FEC_NOFEC;
1273 
1274 	fec_active_long = fec_active;
1275 	return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1276 }
1277 
1278 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1279 	fec_stats->corrected_blocks.lanes[(idx)] = \
1280 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1281 				      fc_fec_corrected_blocks_lane##idx); \
1282 	fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1283 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1284 				      fc_fec_uncorrectable_blocks_lane##idx); \
1285 })
1286 
1287 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1288 			     u32 *ppcnt, u8 lanes)
1289 {
1290 	if (lanes > 3) { /* 4 lanes */
1291 		MLX5E_STATS_SET_FEC_BLOCK(3);
1292 		MLX5E_STATS_SET_FEC_BLOCK(2);
1293 	}
1294 	if (lanes > 1) /* 2 lanes */
1295 		MLX5E_STATS_SET_FEC_BLOCK(1);
1296 	if (lanes > 0) /* 1 lane */
1297 		MLX5E_STATS_SET_FEC_BLOCK(0);
1298 }
1299 
1300 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1301 {
1302 	fec_stats->corrected_blocks.total =
1303 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1304 				      rs_fec_corrected_blocks);
1305 	fec_stats->uncorrectable_blocks.total =
1306 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1307 				      rs_fec_uncorrectable_blocks);
1308 }
1309 
1310 static void fec_set_block_stats(struct mlx5e_priv *priv,
1311 				struct ethtool_fec_stats *fec_stats)
1312 {
1313 	struct mlx5_core_dev *mdev = priv->mdev;
1314 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1315 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1316 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1317 	int mode = fec_active_mode(mdev);
1318 
1319 	if (mode == MLX5E_FEC_NOFEC)
1320 		return;
1321 
1322 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1323 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1324 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1325 		return;
1326 
1327 	switch (mode) {
1328 	case MLX5E_FEC_RS_528_514:
1329 	case MLX5E_FEC_RS_544_514:
1330 	case MLX5E_FEC_LLRS_272_257_1:
1331 		fec_set_rs_stats(fec_stats, out);
1332 		return;
1333 	case MLX5E_FEC_FIRECODE:
1334 		fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1335 	}
1336 }
1337 
1338 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1339 					 struct ethtool_fec_stats *fec_stats)
1340 {
1341 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1342 	struct mlx5_core_dev *mdev = priv->mdev;
1343 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1344 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1345 
1346 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1347 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1348 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1349 				 sz, MLX5_REG_PPCNT, 0, 0))
1350 		return;
1351 
1352 	fec_stats->corrected_bits.total =
1353 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1354 				      phys_layer_statistical_cntrs,
1355 				      phy_corrected_bits);
1356 }
1357 
1358 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1359 			 struct ethtool_fec_stats *fec_stats)
1360 {
1361 	if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1362 		return;
1363 
1364 	fec_set_corrected_bits_total(priv, fec_stats);
1365 	fec_set_block_stats(priv, fec_stats);
1366 }
1367 
1368 #define PPORT_ETH_EXT_OFF(c) \
1369 	MLX5_BYTE_OFF(ppcnt_reg, \
1370 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1371 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1372 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1373 };
1374 
1375 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1376 
1377 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1378 {
1379 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1380 		return NUM_PPORT_ETH_EXT_COUNTERS;
1381 
1382 	return 0;
1383 }
1384 
1385 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1386 {
1387 	int i;
1388 
1389 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1390 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1391 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1392 			       pport_eth_ext_stats_desc[i].format);
1393 	return idx;
1394 }
1395 
1396 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1397 {
1398 	int i;
1399 
1400 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1401 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1402 			data[idx++] =
1403 				MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1404 						    pport_eth_ext_stats_desc, i);
1405 	return idx;
1406 }
1407 
1408 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1409 {
1410 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1411 	struct mlx5_core_dev *mdev = priv->mdev;
1412 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1413 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1414 	void *out;
1415 
1416 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1417 		return;
1418 
1419 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1420 	out = pstats->eth_ext_counters;
1421 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1422 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1423 }
1424 
1425 #define PCIE_PERF_OFF(c) \
1426 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1427 static const struct counter_desc pcie_perf_stats_desc[] = {
1428 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1429 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1430 };
1431 
1432 #define PCIE_PERF_OFF64(c) \
1433 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1434 static const struct counter_desc pcie_perf_stats_desc64[] = {
1435 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1436 };
1437 
1438 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1439 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1440 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1441 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1442 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1443 };
1444 
1445 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1446 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1447 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1448 
1449 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1450 {
1451 	int num_stats = 0;
1452 
1453 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1454 		num_stats += NUM_PCIE_PERF_COUNTERS;
1455 
1456 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1457 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1458 
1459 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1460 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1461 
1462 	return num_stats;
1463 }
1464 
1465 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1466 {
1467 	int i;
1468 
1469 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1470 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1471 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1472 			       pcie_perf_stats_desc[i].format);
1473 
1474 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1475 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1476 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1477 			       pcie_perf_stats_desc64[i].format);
1478 
1479 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1480 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1481 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1482 			       pcie_perf_stall_stats_desc[i].format);
1483 	return idx;
1484 }
1485 
1486 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1487 {
1488 	int i;
1489 
1490 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1491 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1492 			data[idx++] =
1493 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1494 						    pcie_perf_stats_desc, i);
1495 
1496 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1497 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1498 			data[idx++] =
1499 				MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1500 						    pcie_perf_stats_desc64, i);
1501 
1502 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1503 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1504 			data[idx++] =
1505 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1506 						    pcie_perf_stall_stats_desc, i);
1507 	return idx;
1508 }
1509 
1510 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1511 {
1512 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1513 	struct mlx5_core_dev *mdev = priv->mdev;
1514 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1515 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1516 	void *out;
1517 
1518 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1519 		return;
1520 
1521 	out = pcie_stats->pcie_perf_counters;
1522 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1523 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1524 }
1525 
1526 #define PPORT_PER_TC_PRIO_OFF(c) \
1527 	MLX5_BYTE_OFF(ppcnt_reg, \
1528 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1529 
1530 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1531 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1532 };
1533 
1534 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1535 
1536 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1537 	MLX5_BYTE_OFF(ppcnt_reg, \
1538 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1539 
1540 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1541 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1542 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1543 };
1544 
1545 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1546 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1547 
1548 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1549 {
1550 	struct mlx5_core_dev *mdev = priv->mdev;
1551 
1552 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1553 		return 0;
1554 
1555 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1556 }
1557 
1558 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1559 {
1560 	struct mlx5_core_dev *mdev = priv->mdev;
1561 	int i, prio;
1562 
1563 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1564 		return idx;
1565 
1566 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1567 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1568 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1569 				pport_per_tc_prio_stats_desc[i].format, prio);
1570 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1571 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1572 				pport_per_tc_congest_prio_stats_desc[i].format, prio);
1573 	}
1574 
1575 	return idx;
1576 }
1577 
1578 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1579 {
1580 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1581 	struct mlx5_core_dev *mdev = priv->mdev;
1582 	int i, prio;
1583 
1584 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1585 		return idx;
1586 
1587 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1588 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1589 			data[idx++] =
1590 				MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1591 						    pport_per_tc_prio_stats_desc, i);
1592 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1593 			data[idx++] =
1594 				MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1595 						    pport_per_tc_congest_prio_stats_desc, i);
1596 	}
1597 
1598 	return idx;
1599 }
1600 
1601 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1602 {
1603 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1604 	struct mlx5_core_dev *mdev = priv->mdev;
1605 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1606 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1607 	void *out;
1608 	int prio;
1609 
1610 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1611 		return;
1612 
1613 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1614 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1615 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1616 		out = pstats->per_tc_prio_counters[prio];
1617 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1618 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1619 	}
1620 }
1621 
1622 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1623 {
1624 	struct mlx5_core_dev *mdev = priv->mdev;
1625 
1626 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1627 		return 0;
1628 
1629 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1630 }
1631 
1632 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1633 {
1634 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1635 	struct mlx5_core_dev *mdev = priv->mdev;
1636 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1637 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1638 	void *out;
1639 	int prio;
1640 
1641 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1642 		return;
1643 
1644 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1645 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1646 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1647 		out = pstats->per_tc_congest_prio_counters[prio];
1648 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1649 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1650 	}
1651 }
1652 
1653 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1654 {
1655 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1656 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1657 }
1658 
1659 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1660 {
1661 	mlx5e_grp_per_tc_prio_update_stats(priv);
1662 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1663 }
1664 
1665 #define PPORT_PER_PRIO_OFF(c) \
1666 	MLX5_BYTE_OFF(ppcnt_reg, \
1667 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1668 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1669 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1670 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1671 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1672 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1673 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1674 };
1675 
1676 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1677 
1678 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1679 {
1680 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1681 }
1682 
1683 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1684 						   u8 *data,
1685 						   int idx)
1686 {
1687 	int i, prio;
1688 
1689 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1690 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1691 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1692 				pport_per_prio_traffic_stats_desc[i].format, prio);
1693 	}
1694 
1695 	return idx;
1696 }
1697 
1698 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1699 						 u64 *data,
1700 						 int idx)
1701 {
1702 	int i, prio;
1703 
1704 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1705 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1706 			data[idx++] =
1707 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1708 						    pport_per_prio_traffic_stats_desc, i);
1709 	}
1710 
1711 	return idx;
1712 }
1713 
1714 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1715 	/* %s is "global" or "prio{i}" */
1716 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1717 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1718 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1719 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1720 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1721 };
1722 
1723 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1724 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1725 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1726 };
1727 
1728 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1729 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1730 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1731 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1732 
1733 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1734 {
1735 	struct mlx5_core_dev *mdev = priv->mdev;
1736 	u8 pfc_en_tx;
1737 	u8 pfc_en_rx;
1738 	int err;
1739 
1740 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1741 		return 0;
1742 
1743 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1744 
1745 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1746 }
1747 
1748 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1749 {
1750 	struct mlx5_core_dev *mdev = priv->mdev;
1751 	u32 rx_pause;
1752 	u32 tx_pause;
1753 	int err;
1754 
1755 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1756 		return false;
1757 
1758 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1759 
1760 	return err ? false : rx_pause | tx_pause;
1761 }
1762 
1763 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1764 {
1765 	return (mlx5e_query_global_pause_combined(priv) +
1766 		hweight8(mlx5e_query_pfc_combined(priv))) *
1767 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1768 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1769 }
1770 
1771 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1772 					       u8 *data,
1773 					       int idx)
1774 {
1775 	unsigned long pfc_combined;
1776 	int i, prio;
1777 
1778 	pfc_combined = mlx5e_query_pfc_combined(priv);
1779 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1780 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1781 			char pfc_string[ETH_GSTRING_LEN];
1782 
1783 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1784 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1785 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1786 		}
1787 	}
1788 
1789 	if (mlx5e_query_global_pause_combined(priv)) {
1790 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1791 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1792 				pport_per_prio_pfc_stats_desc[i].format, "global");
1793 		}
1794 	}
1795 
1796 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1797 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1798 		       pport_pfc_stall_stats_desc[i].format);
1799 
1800 	return idx;
1801 }
1802 
1803 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1804 					     u64 *data,
1805 					     int idx)
1806 {
1807 	unsigned long pfc_combined;
1808 	int i, prio;
1809 
1810 	pfc_combined = mlx5e_query_pfc_combined(priv);
1811 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1812 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1813 			data[idx++] =
1814 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1815 						    pport_per_prio_pfc_stats_desc, i);
1816 		}
1817 	}
1818 
1819 	if (mlx5e_query_global_pause_combined(priv)) {
1820 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1821 			data[idx++] =
1822 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1823 						    pport_per_prio_pfc_stats_desc, i);
1824 		}
1825 	}
1826 
1827 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1828 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1829 						  pport_pfc_stall_stats_desc, i);
1830 
1831 	return idx;
1832 }
1833 
1834 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1835 {
1836 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1837 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1838 }
1839 
1840 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1841 {
1842 	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1843 	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1844 	return idx;
1845 }
1846 
1847 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1848 {
1849 	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1850 	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1851 	return idx;
1852 }
1853 
1854 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1855 {
1856 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1857 	struct mlx5_core_dev *mdev = priv->mdev;
1858 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1859 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1860 	int prio;
1861 	void *out;
1862 
1863 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1864 		return;
1865 
1866 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1867 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1868 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1869 		out = pstats->per_prio_counters[prio];
1870 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1871 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1872 				     MLX5_REG_PPCNT, 0, 0);
1873 	}
1874 }
1875 
1876 static const struct counter_desc mlx5e_pme_status_desc[] = {
1877 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1878 };
1879 
1880 static const struct counter_desc mlx5e_pme_error_desc[] = {
1881 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1882 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1883 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1884 };
1885 
1886 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1887 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1888 
1889 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1890 {
1891 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1892 }
1893 
1894 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1895 {
1896 	int i;
1897 
1898 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1899 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1900 
1901 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1902 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1903 
1904 	return idx;
1905 }
1906 
1907 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1908 {
1909 	struct mlx5_pme_stats pme_stats;
1910 	int i;
1911 
1912 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
1913 
1914 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1915 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1916 						   mlx5e_pme_status_desc, i);
1917 
1918 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1919 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1920 						   mlx5e_pme_error_desc, i);
1921 
1922 	return idx;
1923 }
1924 
1925 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1926 
1927 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1928 {
1929 	return mlx5e_ktls_get_count(priv);
1930 }
1931 
1932 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1933 {
1934 	return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1935 }
1936 
1937 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1938 {
1939 	return idx + mlx5e_ktls_get_stats(priv, data + idx);
1940 }
1941 
1942 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1943 
1944 static const struct counter_desc rq_stats_desc[] = {
1945 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1946 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1947 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1948 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1949 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1950 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1951 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1952 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1953 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1954 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1955 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1956 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1957 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1958 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1959 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1960 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1961 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1962 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1963 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1964 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1965 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1966 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1967 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1968 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1969 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1970 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1971 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1972 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1973 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1974 #ifdef CONFIG_PAGE_POOL_STATS
1975 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
1976 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
1977 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
1978 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
1979 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
1980 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
1981 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
1982 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
1983 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
1984 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
1985 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
1986 #endif
1987 #ifdef CONFIG_MLX5_EN_TLS
1988 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1989 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1990 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1991 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1992 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1993 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1994 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1995 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1996 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1997 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1998 #endif
1999 };
2000 
2001 static const struct counter_desc sq_stats_desc[] = {
2002 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2003 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2004 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2005 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2006 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2007 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2008 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2009 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2010 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2011 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2012 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2013 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2014 #ifdef CONFIG_MLX5_EN_TLS
2015 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2016 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2017 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2018 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2019 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2020 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2021 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2022 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2023 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2024 #endif
2025 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2026 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2027 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2028 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2029 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2030 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2031 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2032 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2033 };
2034 
2035 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2036 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2037 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2038 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2039 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2040 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2041 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2042 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2043 };
2044 
2045 static const struct counter_desc xdpsq_stats_desc[] = {
2046 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2047 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2048 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2049 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2050 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2051 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2052 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2053 };
2054 
2055 static const struct counter_desc xskrq_stats_desc[] = {
2056 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2057 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2058 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2059 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2060 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2061 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2062 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2063 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2064 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2065 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2066 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2067 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2068 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2069 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2070 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2071 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2072 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2073 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2074 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2075 };
2076 
2077 static const struct counter_desc xsksq_stats_desc[] = {
2078 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2079 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2080 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2081 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2082 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2083 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2084 };
2085 
2086 static const struct counter_desc ch_stats_desc[] = {
2087 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2088 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2089 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2090 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2091 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2092 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2093 };
2094 
2095 static const struct counter_desc ptp_sq_stats_desc[] = {
2096 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2097 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2098 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2099 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2100 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2101 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2102 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2103 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2104 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2105 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2106 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2107 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2108 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2109 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2110 };
2111 
2112 static const struct counter_desc ptp_ch_stats_desc[] = {
2113 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2114 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2115 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2116 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2117 };
2118 
2119 static const struct counter_desc ptp_cq_stats_desc[] = {
2120 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2121 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2122 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2123 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2124 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
2125 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
2126 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
2127 };
2128 
2129 static const struct counter_desc ptp_rq_stats_desc[] = {
2130 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2131 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2132 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2133 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2134 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2135 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2136 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2137 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2138 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2139 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2140 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2141 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2142 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2143 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2144 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2145 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2146 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2147 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2148 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2149 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2150 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2151 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2152 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2153 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2154 };
2155 
2156 static const struct counter_desc qos_sq_stats_desc[] = {
2157 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2158 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2159 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2160 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2161 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2162 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2163 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2164 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2165 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2166 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2167 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2168 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2169 #ifdef CONFIG_MLX5_EN_TLS
2170 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2171 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2172 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2173 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2174 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2175 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2176 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2177 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2178 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2179 #endif
2180 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2181 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2182 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2183 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2184 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2185 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2186 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2187 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2188 };
2189 
2190 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
2191 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
2192 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
2193 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
2194 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
2195 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
2196 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
2197 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
2198 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
2199 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
2200 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2201 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
2202 
2203 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2204 {
2205 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2206 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2207 }
2208 
2209 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2210 {
2211 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2212 	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2213 	int i, qid;
2214 
2215 	for (qid = 0; qid < max_qos_sqs; qid++)
2216 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2217 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2218 				qos_sq_stats_desc[i].format, qid);
2219 
2220 	return idx;
2221 }
2222 
2223 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2224 {
2225 	struct mlx5e_sq_stats **stats;
2226 	u16 max_qos_sqs;
2227 	int i, qid;
2228 
2229 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2230 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2231 	stats = READ_ONCE(priv->htb_qos_sq_stats);
2232 
2233 	for (qid = 0; qid < max_qos_sqs; qid++) {
2234 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2235 
2236 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2237 			data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2238 	}
2239 
2240 	return idx;
2241 }
2242 
2243 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2244 
2245 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2246 {
2247 	int num = NUM_PTP_CH_STATS;
2248 
2249 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2250 		return 0;
2251 
2252 	if (priv->tx_ptp_opened)
2253 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2254 	if (priv->rx_ptp_opened)
2255 		num += NUM_PTP_RQ_STATS;
2256 
2257 	return num;
2258 }
2259 
2260 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2261 {
2262 	int i, tc;
2263 
2264 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2265 		return idx;
2266 
2267 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2268 		sprintf(data + (idx++) * ETH_GSTRING_LEN,
2269 			"%s", ptp_ch_stats_desc[i].format);
2270 
2271 	if (priv->tx_ptp_opened) {
2272 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2273 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2274 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2275 					ptp_sq_stats_desc[i].format, tc);
2276 
2277 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2278 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2279 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2280 					ptp_cq_stats_desc[i].format, tc);
2281 	}
2282 	if (priv->rx_ptp_opened) {
2283 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2284 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2285 				ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2286 	}
2287 	return idx;
2288 }
2289 
2290 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2291 {
2292 	int i, tc;
2293 
2294 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2295 		return idx;
2296 
2297 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2298 		data[idx++] =
2299 			MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2300 					     ptp_ch_stats_desc, i);
2301 
2302 	if (priv->tx_ptp_opened) {
2303 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2304 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2305 				data[idx++] =
2306 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2307 							     ptp_sq_stats_desc, i);
2308 
2309 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2310 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2311 				data[idx++] =
2312 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2313 							     ptp_cq_stats_desc, i);
2314 	}
2315 	if (priv->rx_ptp_opened) {
2316 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2317 			data[idx++] =
2318 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2319 						     ptp_rq_stats_desc, i);
2320 	}
2321 	return idx;
2322 }
2323 
2324 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2325 
2326 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2327 {
2328 	int max_nch = priv->stats_nch;
2329 
2330 	return (NUM_RQ_STATS * max_nch) +
2331 	       (NUM_CH_STATS * max_nch) +
2332 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2333 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2334 	       (NUM_XDPSQ_STATS * max_nch) +
2335 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2336 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2337 }
2338 
2339 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2340 {
2341 	bool is_xsk = priv->xsk.ever_used;
2342 	int max_nch = priv->stats_nch;
2343 	int i, j, tc;
2344 
2345 	for (i = 0; i < max_nch; i++)
2346 		for (j = 0; j < NUM_CH_STATS; j++)
2347 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2348 				ch_stats_desc[j].format, i);
2349 
2350 	for (i = 0; i < max_nch; i++) {
2351 		for (j = 0; j < NUM_RQ_STATS; j++)
2352 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2353 				rq_stats_desc[j].format, i);
2354 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2355 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2356 				xskrq_stats_desc[j].format, i);
2357 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2358 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2359 				rq_xdpsq_stats_desc[j].format, i);
2360 	}
2361 
2362 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2363 		for (i = 0; i < max_nch; i++)
2364 			for (j = 0; j < NUM_SQ_STATS; j++)
2365 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2366 					sq_stats_desc[j].format,
2367 					i + tc * max_nch);
2368 
2369 	for (i = 0; i < max_nch; i++) {
2370 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2371 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2372 				xsksq_stats_desc[j].format, i);
2373 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2374 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2375 				xdpsq_stats_desc[j].format, i);
2376 	}
2377 
2378 	return idx;
2379 }
2380 
2381 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2382 {
2383 	bool is_xsk = priv->xsk.ever_used;
2384 	int max_nch = priv->stats_nch;
2385 	int i, j, tc;
2386 
2387 	for (i = 0; i < max_nch; i++)
2388 		for (j = 0; j < NUM_CH_STATS; j++)
2389 			data[idx++] =
2390 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2391 						     ch_stats_desc, j);
2392 
2393 	for (i = 0; i < max_nch; i++) {
2394 		for (j = 0; j < NUM_RQ_STATS; j++)
2395 			data[idx++] =
2396 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2397 						     rq_stats_desc, j);
2398 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2399 			data[idx++] =
2400 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2401 						     xskrq_stats_desc, j);
2402 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2403 			data[idx++] =
2404 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2405 						     rq_xdpsq_stats_desc, j);
2406 	}
2407 
2408 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2409 		for (i = 0; i < max_nch; i++)
2410 			for (j = 0; j < NUM_SQ_STATS; j++)
2411 				data[idx++] =
2412 					MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2413 							     sq_stats_desc, j);
2414 
2415 	for (i = 0; i < max_nch; i++) {
2416 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2417 			data[idx++] =
2418 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2419 						     xsksq_stats_desc, j);
2420 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2421 			data[idx++] =
2422 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2423 						     xdpsq_stats_desc, j);
2424 	}
2425 
2426 	return idx;
2427 }
2428 
2429 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2430 
2431 MLX5E_DEFINE_STATS_GRP(sw, 0);
2432 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2433 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2434 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2435 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2436 MLX5E_DEFINE_STATS_GRP(2863, 0);
2437 MLX5E_DEFINE_STATS_GRP(2819, 0);
2438 MLX5E_DEFINE_STATS_GRP(phy, 0);
2439 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2440 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2441 MLX5E_DEFINE_STATS_GRP(pme, 0);
2442 MLX5E_DEFINE_STATS_GRP(channels, 0);
2443 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2444 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2445 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2446 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2447 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2448 
2449 /* The stats groups order is opposite to the update_stats() order calls */
2450 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2451 	&MLX5E_STATS_GRP(sw),
2452 	&MLX5E_STATS_GRP(qcnt),
2453 	&MLX5E_STATS_GRP(vnic_env),
2454 	&MLX5E_STATS_GRP(vport),
2455 	&MLX5E_STATS_GRP(802_3),
2456 	&MLX5E_STATS_GRP(2863),
2457 	&MLX5E_STATS_GRP(2819),
2458 	&MLX5E_STATS_GRP(phy),
2459 	&MLX5E_STATS_GRP(eth_ext),
2460 	&MLX5E_STATS_GRP(pcie),
2461 	&MLX5E_STATS_GRP(per_prio),
2462 	&MLX5E_STATS_GRP(pme),
2463 #ifdef CONFIG_MLX5_EN_IPSEC
2464 	&MLX5E_STATS_GRP(ipsec_hw),
2465 	&MLX5E_STATS_GRP(ipsec_sw),
2466 #endif
2467 	&MLX5E_STATS_GRP(tls),
2468 	&MLX5E_STATS_GRP(channels),
2469 	&MLX5E_STATS_GRP(per_port_buff_congest),
2470 	&MLX5E_STATS_GRP(ptp),
2471 	&MLX5E_STATS_GRP(qos),
2472 #ifdef CONFIG_MLX5_EN_MACSEC
2473 	&MLX5E_STATS_GRP(macsec_hw),
2474 #endif
2475 };
2476 
2477 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2478 {
2479 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2480 }
2481