1 /* 2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "lib/mlx5.h" 34 #include "en.h" 35 #include "en_accel/tls.h" 36 #include "en_accel/en_accel.h" 37 38 static unsigned int stats_grps_num(struct mlx5e_priv *priv) 39 { 40 return !priv->profile->stats_grps_num ? 0 : 41 priv->profile->stats_grps_num(priv); 42 } 43 44 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) 45 { 46 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 47 const unsigned int num_stats_grps = stats_grps_num(priv); 48 unsigned int total = 0; 49 int i; 50 51 for (i = 0; i < num_stats_grps; i++) 52 total += stats_grps[i]->get_num_stats(priv); 53 54 return total; 55 } 56 57 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv) 58 { 59 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 60 const unsigned int num_stats_grps = stats_grps_num(priv); 61 int i; 62 63 for (i = num_stats_grps - 1; i >= 0; i--) 64 if (stats_grps[i]->update_stats && 65 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS) 66 stats_grps[i]->update_stats(priv); 67 } 68 69 void mlx5e_stats_update(struct mlx5e_priv *priv) 70 { 71 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 72 const unsigned int num_stats_grps = stats_grps_num(priv); 73 int i; 74 75 for (i = num_stats_grps - 1; i >= 0; i--) 76 if (stats_grps[i]->update_stats) 77 stats_grps[i]->update_stats(priv); 78 } 79 80 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) 81 { 82 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 83 const unsigned int num_stats_grps = stats_grps_num(priv); 84 int i; 85 86 for (i = 0; i < num_stats_grps; i++) 87 idx = stats_grps[i]->fill_stats(priv, data, idx); 88 } 89 90 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) 91 { 92 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 93 const unsigned int num_stats_grps = stats_grps_num(priv); 94 int i, idx = 0; 95 96 for (i = 0; i < num_stats_grps; i++) 97 idx = stats_grps[i]->fill_strings(priv, data, idx); 98 } 99 100 /* Concrete NIC Stats */ 101 102 static const struct counter_desc sw_stats_desc[] = { 103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, 108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, 111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, 112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) }, 115 116 #ifdef CONFIG_MLX5_EN_TLS 117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, 118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, 119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, 120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, 121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, 122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, 123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, 124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, 125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, 126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, 127 #endif 128 129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, 131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, 132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, 133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, 134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, 137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, 138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, 141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, 142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, 143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, 144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, 145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, 147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, 148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, 149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, 154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, 155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, 156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, 158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, 159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, 160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, 161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, 162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, 163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, 168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, 169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, 173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, 174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, 175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, 176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, 177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, 178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, 179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, 180 #ifdef CONFIG_MLX5_EN_TLS 181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, 182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, 183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) }, 184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) }, 185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, 186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, 187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, 188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, 189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, 190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, 191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, 192 #endif 193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, 194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, 195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, 196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, 197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) }, 198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, 199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) }, 200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) }, 201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) }, 202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) }, 203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) }, 204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) }, 205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) }, 206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) }, 207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) }, 208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) }, 209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) }, 210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) }, 211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) }, 212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) }, 213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) }, 214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) }, 215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) }, 216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) }, 217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) }, 218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) }, 219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) }, 220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) }, 221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) }, 222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) }, 223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) }, 224 }; 225 226 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 227 228 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw) 229 { 230 return NUM_SW_COUNTERS; 231 } 232 233 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) 234 { 235 int i; 236 237 for (i = 0; i < NUM_SW_COUNTERS; i++) 238 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); 239 return idx; 240 } 241 242 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) 243 { 244 int i; 245 246 for (i = 0; i < NUM_SW_COUNTERS; i++) 247 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i); 248 return idx; 249 } 250 251 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, 252 struct mlx5e_xdpsq_stats *xdpsq_red_stats) 253 { 254 s->tx_xdp_xmit += xdpsq_red_stats->xmit; 255 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; 256 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; 257 s->tx_xdp_nops += xdpsq_red_stats->nops; 258 s->tx_xdp_full += xdpsq_red_stats->full; 259 s->tx_xdp_err += xdpsq_red_stats->err; 260 s->tx_xdp_cqes += xdpsq_red_stats->cqes; 261 } 262 263 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s, 264 struct mlx5e_xdpsq_stats *xdpsq_stats) 265 { 266 s->rx_xdp_tx_xmit += xdpsq_stats->xmit; 267 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; 268 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; 269 s->rx_xdp_tx_nops += xdpsq_stats->nops; 270 s->rx_xdp_tx_full += xdpsq_stats->full; 271 s->rx_xdp_tx_err += xdpsq_stats->err; 272 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; 273 } 274 275 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s, 276 struct mlx5e_xdpsq_stats *xsksq_stats) 277 { 278 s->tx_xsk_xmit += xsksq_stats->xmit; 279 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; 280 s->tx_xsk_inlnw += xsksq_stats->inlnw; 281 s->tx_xsk_full += xsksq_stats->full; 282 s->tx_xsk_err += xsksq_stats->err; 283 s->tx_xsk_cqes += xsksq_stats->cqes; 284 } 285 286 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, 287 struct mlx5e_rq_stats *xskrq_stats) 288 { 289 s->rx_xsk_packets += xskrq_stats->packets; 290 s->rx_xsk_bytes += xskrq_stats->bytes; 291 s->rx_xsk_csum_complete += xskrq_stats->csum_complete; 292 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; 293 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; 294 s->rx_xsk_csum_none += xskrq_stats->csum_none; 295 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; 296 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; 297 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; 298 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; 299 s->rx_xsk_wqe_err += xskrq_stats->wqe_err; 300 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; 301 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; 302 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; 303 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; 304 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; 305 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; 306 s->rx_xsk_congst_umr += xskrq_stats->congst_umr; 307 s->rx_xsk_arfs_err += xskrq_stats->arfs_err; 308 } 309 310 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, 311 struct mlx5e_rq_stats *rq_stats) 312 { 313 s->rx_packets += rq_stats->packets; 314 s->rx_bytes += rq_stats->bytes; 315 s->rx_lro_packets += rq_stats->lro_packets; 316 s->rx_lro_bytes += rq_stats->lro_bytes; 317 s->rx_ecn_mark += rq_stats->ecn_mark; 318 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; 319 s->rx_csum_none += rq_stats->csum_none; 320 s->rx_csum_complete += rq_stats->csum_complete; 321 s->rx_csum_complete_tail += rq_stats->csum_complete_tail; 322 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; 323 s->rx_csum_unnecessary += rq_stats->csum_unnecessary; 324 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 325 s->rx_xdp_drop += rq_stats->xdp_drop; 326 s->rx_xdp_redirect += rq_stats->xdp_redirect; 327 s->rx_wqe_err += rq_stats->wqe_err; 328 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 329 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; 330 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; 331 s->rx_buff_alloc_err += rq_stats->buff_alloc_err; 332 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; 333 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; 334 s->rx_cache_reuse += rq_stats->cache_reuse; 335 s->rx_cache_full += rq_stats->cache_full; 336 s->rx_cache_empty += rq_stats->cache_empty; 337 s->rx_cache_busy += rq_stats->cache_busy; 338 s->rx_cache_waive += rq_stats->cache_waive; 339 s->rx_congst_umr += rq_stats->congst_umr; 340 s->rx_arfs_err += rq_stats->arfs_err; 341 s->rx_recover += rq_stats->recover; 342 #ifdef CONFIG_MLX5_EN_TLS 343 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; 344 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; 345 s->rx_tls_ctx += rq_stats->tls_ctx; 346 s->rx_tls_del += rq_stats->tls_del; 347 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; 348 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; 349 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; 350 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; 351 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; 352 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; 353 s->rx_tls_err += rq_stats->tls_err; 354 #endif 355 } 356 357 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s, 358 struct mlx5e_ch_stats *ch_stats) 359 { 360 s->ch_events += ch_stats->events; 361 s->ch_poll += ch_stats->poll; 362 s->ch_arm += ch_stats->arm; 363 s->ch_aff_change += ch_stats->aff_change; 364 s->ch_force_irq += ch_stats->force_irq; 365 s->ch_eq_rearm += ch_stats->eq_rearm; 366 } 367 368 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, 369 struct mlx5e_sq_stats *sq_stats) 370 { 371 s->tx_packets += sq_stats->packets; 372 s->tx_bytes += sq_stats->bytes; 373 s->tx_tso_packets += sq_stats->tso_packets; 374 s->tx_tso_bytes += sq_stats->tso_bytes; 375 s->tx_tso_inner_packets += sq_stats->tso_inner_packets; 376 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; 377 s->tx_added_vlan_packets += sq_stats->added_vlan_packets; 378 s->tx_nop += sq_stats->nop; 379 s->tx_mpwqe_blks += sq_stats->mpwqe_blks; 380 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; 381 s->tx_queue_stopped += sq_stats->stopped; 382 s->tx_queue_wake += sq_stats->wake; 383 s->tx_queue_dropped += sq_stats->dropped; 384 s->tx_cqe_err += sq_stats->cqe_err; 385 s->tx_recover += sq_stats->recover; 386 s->tx_xmit_more += sq_stats->xmit_more; 387 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 388 s->tx_csum_none += sq_stats->csum_none; 389 s->tx_csum_partial += sq_stats->csum_partial; 390 #ifdef CONFIG_MLX5_EN_TLS 391 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; 392 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; 393 s->tx_tls_ctx += sq_stats->tls_ctx; 394 s->tx_tls_ooo += sq_stats->tls_ooo; 395 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; 396 s->tx_tls_dump_packets += sq_stats->tls_dump_packets; 397 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; 398 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; 399 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; 400 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; 401 #endif 402 s->tx_cqes += sq_stats->cqes; 403 } 404 405 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, 406 struct mlx5e_sw_stats *s) 407 { 408 int i; 409 410 if (!priv->port_ptp_opened) 411 return; 412 413 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch); 414 415 for (i = 0; i < priv->max_opened_tc; i++) { 416 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]); 417 418 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 419 barrier(); 420 } 421 } 422 423 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, 424 struct mlx5e_sw_stats *s) 425 { 426 struct mlx5e_sq_stats **stats; 427 u16 max_qos_sqs; 428 int i; 429 430 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 431 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 432 stats = READ_ONCE(priv->htb.qos_sq_stats); 433 434 for (i = 0; i < max_qos_sqs; i++) { 435 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); 436 437 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 438 barrier(); 439 } 440 } 441 442 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) 443 { 444 struct mlx5e_sw_stats *s = &priv->stats.sw; 445 int i; 446 447 memset(s, 0, sizeof(*s)); 448 449 for (i = 0; i < priv->max_nch; i++) { 450 struct mlx5e_channel_stats *channel_stats = 451 &priv->channel_stats[i]; 452 int j; 453 454 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); 455 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); 456 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); 457 /* xdp redirect */ 458 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq); 459 /* AF_XDP zero-copy */ 460 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq); 461 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq); 462 463 for (j = 0; j < priv->max_opened_tc; j++) { 464 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]); 465 466 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 467 barrier(); 468 } 469 } 470 mlx5e_stats_grp_sw_update_stats_ptp(priv, s); 471 mlx5e_stats_grp_sw_update_stats_qos(priv, s); 472 } 473 474 static const struct counter_desc q_stats_desc[] = { 475 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, 476 }; 477 478 static const struct counter_desc drop_rq_stats_desc[] = { 479 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) }, 480 }; 481 482 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) 483 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) 484 485 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) 486 { 487 int num_stats = 0; 488 489 if (priv->q_counter) 490 num_stats += NUM_Q_COUNTERS; 491 492 if (priv->drop_rq_q_counter) 493 num_stats += NUM_DROP_RQ_COUNTERS; 494 495 return num_stats; 496 } 497 498 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) 499 { 500 int i; 501 502 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) 503 strcpy(data + (idx++) * ETH_GSTRING_LEN, 504 q_stats_desc[i].format); 505 506 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 507 strcpy(data + (idx++) * ETH_GSTRING_LEN, 508 drop_rq_stats_desc[i].format); 509 510 return idx; 511 } 512 513 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) 514 { 515 int i; 516 517 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) 518 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 519 q_stats_desc, i); 520 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 521 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 522 drop_rq_stats_desc, i); 523 return idx; 524 } 525 526 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) 527 { 528 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; 529 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; 530 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; 531 int ret; 532 533 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); 534 535 if (priv->q_counter) { 536 MLX5_SET(query_q_counter_in, in, counter_set_id, 537 priv->q_counter); 538 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 539 if (!ret) 540 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, 541 out, out_of_buffer); 542 } 543 544 if (priv->drop_rq_q_counter) { 545 MLX5_SET(query_q_counter_in, in, counter_set_id, 546 priv->drop_rq_q_counter); 547 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 548 if (!ret) 549 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, 550 out, out_of_buffer); 551 } 552 } 553 554 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) 555 static const struct counter_desc vnic_env_stats_steer_desc[] = { 556 { "rx_steer_missed_packets", 557 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, 558 }; 559 560 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { 561 { "dev_internal_queue_oob", 562 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, 563 }; 564 565 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ 566 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ 567 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) 568 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ 569 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ 570 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) 571 572 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) 573 { 574 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + 575 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); 576 } 577 578 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) 579 { 580 int i; 581 582 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 583 strcpy(data + (idx++) * ETH_GSTRING_LEN, 584 vnic_env_stats_steer_desc[i].format); 585 586 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 587 strcpy(data + (idx++) * ETH_GSTRING_LEN, 588 vnic_env_stats_dev_oob_desc[i].format); 589 return idx; 590 } 591 592 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) 593 { 594 int i; 595 596 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 597 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, 598 vnic_env_stats_steer_desc, i); 599 600 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 601 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, 602 vnic_env_stats_dev_oob_desc, i); 603 return idx; 604 } 605 606 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) 607 { 608 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; 609 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 610 struct mlx5_core_dev *mdev = priv->mdev; 611 612 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 613 return; 614 615 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); 616 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); 617 } 618 619 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) 620 static const struct counter_desc vport_stats_desc[] = { 621 { "rx_vport_unicast_packets", 622 VPORT_COUNTER_OFF(received_eth_unicast.packets) }, 623 { "rx_vport_unicast_bytes", 624 VPORT_COUNTER_OFF(received_eth_unicast.octets) }, 625 { "tx_vport_unicast_packets", 626 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, 627 { "tx_vport_unicast_bytes", 628 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, 629 { "rx_vport_multicast_packets", 630 VPORT_COUNTER_OFF(received_eth_multicast.packets) }, 631 { "rx_vport_multicast_bytes", 632 VPORT_COUNTER_OFF(received_eth_multicast.octets) }, 633 { "tx_vport_multicast_packets", 634 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, 635 { "tx_vport_multicast_bytes", 636 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, 637 { "rx_vport_broadcast_packets", 638 VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, 639 { "rx_vport_broadcast_bytes", 640 VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, 641 { "tx_vport_broadcast_packets", 642 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, 643 { "tx_vport_broadcast_bytes", 644 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, 645 { "rx_vport_rdma_unicast_packets", 646 VPORT_COUNTER_OFF(received_ib_unicast.packets) }, 647 { "rx_vport_rdma_unicast_bytes", 648 VPORT_COUNTER_OFF(received_ib_unicast.octets) }, 649 { "tx_vport_rdma_unicast_packets", 650 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, 651 { "tx_vport_rdma_unicast_bytes", 652 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, 653 { "rx_vport_rdma_multicast_packets", 654 VPORT_COUNTER_OFF(received_ib_multicast.packets) }, 655 { "rx_vport_rdma_multicast_bytes", 656 VPORT_COUNTER_OFF(received_ib_multicast.octets) }, 657 { "tx_vport_rdma_multicast_packets", 658 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, 659 { "tx_vport_rdma_multicast_bytes", 660 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, 661 }; 662 663 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) 664 665 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport) 666 { 667 return NUM_VPORT_COUNTERS; 668 } 669 670 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) 671 { 672 int i; 673 674 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 675 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format); 676 return idx; 677 } 678 679 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) 680 { 681 int i; 682 683 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 684 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, 685 vport_stats_desc, i); 686 return idx; 687 } 688 689 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) 690 { 691 u32 *out = (u32 *)priv->stats.vport.query_vport_out; 692 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 693 struct mlx5_core_dev *mdev = priv->mdev; 694 695 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); 696 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out); 697 } 698 699 #define PPORT_802_3_OFF(c) \ 700 MLX5_BYTE_OFF(ppcnt_reg, \ 701 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) 702 static const struct counter_desc pport_802_3_stats_desc[] = { 703 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, 704 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, 705 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, 706 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, 707 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, 708 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, 709 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, 710 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, 711 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, 712 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, 713 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, 714 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, 715 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, 716 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, 717 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, 718 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, 719 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, 720 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, 721 }; 722 723 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 724 725 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3) 726 { 727 return NUM_PPORT_802_3_COUNTERS; 728 } 729 730 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) 731 { 732 int i; 733 734 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 735 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format); 736 return idx; 737 } 738 739 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) 740 { 741 int i; 742 743 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 744 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, 745 pport_802_3_stats_desc, i); 746 return idx; 747 } 748 749 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ 750 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) 751 752 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) 753 { 754 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 755 struct mlx5_core_dev *mdev = priv->mdev; 756 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 757 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 758 void *out; 759 760 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 761 return; 762 763 MLX5_SET(ppcnt_reg, in, local_port, 1); 764 out = pstats->IEEE_802_3_counters; 765 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 766 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 767 } 768 769 #define MLX5E_READ_CTR64_BE_F(ptr, c) \ 770 be64_to_cpu(*(__be64 *)((char *)ptr + \ 771 MLX5_BYTE_OFF(ppcnt_reg, \ 772 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high))) 773 774 void mlx5e_stats_pause_get(struct mlx5e_priv *priv, 775 struct ethtool_pause_stats *pause_stats) 776 { 777 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 778 struct mlx5_core_dev *mdev = priv->mdev; 779 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 780 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 781 782 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 783 return; 784 785 MLX5_SET(ppcnt_reg, in, local_port, 1); 786 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 787 mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, 788 sz, MLX5_REG_PPCNT, 0, 0); 789 790 pause_stats->tx_pause_frames = 791 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 792 a_pause_mac_ctrl_frames_transmitted); 793 pause_stats->rx_pause_frames = 794 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 795 a_pause_mac_ctrl_frames_received); 796 } 797 798 #define PPORT_2863_OFF(c) \ 799 MLX5_BYTE_OFF(ppcnt_reg, \ 800 counter_set.eth_2863_cntrs_grp_data_layout.c##_high) 801 static const struct counter_desc pport_2863_stats_desc[] = { 802 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, 803 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, 804 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, 805 }; 806 807 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 808 809 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863) 810 { 811 return NUM_PPORT_2863_COUNTERS; 812 } 813 814 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) 815 { 816 int i; 817 818 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 819 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format); 820 return idx; 821 } 822 823 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) 824 { 825 int i; 826 827 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 828 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, 829 pport_2863_stats_desc, i); 830 return idx; 831 } 832 833 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) 834 { 835 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 836 struct mlx5_core_dev *mdev = priv->mdev; 837 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 838 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 839 void *out; 840 841 MLX5_SET(ppcnt_reg, in, local_port, 1); 842 out = pstats->RFC_2863_counters; 843 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 844 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 845 } 846 847 #define PPORT_2819_OFF(c) \ 848 MLX5_BYTE_OFF(ppcnt_reg, \ 849 counter_set.eth_2819_cntrs_grp_data_layout.c##_high) 850 static const struct counter_desc pport_2819_stats_desc[] = { 851 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, 852 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, 853 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, 854 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, 855 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, 856 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, 857 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, 858 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, 859 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, 860 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, 861 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, 862 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, 863 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, 864 }; 865 866 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 867 868 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819) 869 { 870 return NUM_PPORT_2819_COUNTERS; 871 } 872 873 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) 874 { 875 int i; 876 877 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 878 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); 879 return idx; 880 } 881 882 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) 883 { 884 int i; 885 886 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 887 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, 888 pport_2819_stats_desc, i); 889 return idx; 890 } 891 892 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) 893 { 894 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 895 struct mlx5_core_dev *mdev = priv->mdev; 896 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 897 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 898 void *out; 899 900 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 901 return; 902 903 MLX5_SET(ppcnt_reg, in, local_port, 1); 904 out = pstats->RFC_2819_counters; 905 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 906 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 907 } 908 909 #define PPORT_PHY_STATISTICAL_OFF(c) \ 910 MLX5_BYTE_OFF(ppcnt_reg, \ 911 counter_set.phys_layer_statistical_cntrs.c##_high) 912 static const struct counter_desc pport_phy_statistical_stats_desc[] = { 913 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, 914 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, 915 }; 916 917 static const struct counter_desc 918 pport_phy_statistical_err_lanes_stats_desc[] = { 919 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) }, 920 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) }, 921 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) }, 922 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, 923 }; 924 925 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ 926 ARRAY_SIZE(pport_phy_statistical_stats_desc) 927 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ 928 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) 929 930 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) 931 { 932 struct mlx5_core_dev *mdev = priv->mdev; 933 int num_stats; 934 935 /* "1" for link_down_events special counter */ 936 num_stats = 1; 937 938 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ? 939 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0; 940 941 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ? 942 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0; 943 944 return num_stats; 945 } 946 947 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) 948 { 949 struct mlx5_core_dev *mdev = priv->mdev; 950 int i; 951 952 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); 953 954 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 955 return idx; 956 957 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) 958 strcpy(data + (idx++) * ETH_GSTRING_LEN, 959 pport_phy_statistical_stats_desc[i].format); 960 961 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) 962 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) 963 strcpy(data + (idx++) * ETH_GSTRING_LEN, 964 pport_phy_statistical_err_lanes_stats_desc[i].format); 965 966 return idx; 967 } 968 969 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) 970 { 971 struct mlx5_core_dev *mdev = priv->mdev; 972 int i; 973 974 /* link_down_events_phy has special handling since it is not stored in __be64 format */ 975 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, 976 counter_set.phys_layer_cntrs.link_down_events); 977 978 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 979 return idx; 980 981 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) 982 data[idx++] = 983 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, 984 pport_phy_statistical_stats_desc, i); 985 986 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) 987 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) 988 data[idx++] = 989 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, 990 pport_phy_statistical_err_lanes_stats_desc, 991 i); 992 return idx; 993 } 994 995 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) 996 { 997 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 998 struct mlx5_core_dev *mdev = priv->mdev; 999 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1000 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1001 void *out; 1002 1003 MLX5_SET(ppcnt_reg, in, local_port, 1); 1004 out = pstats->phy_counters; 1005 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1006 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1007 1008 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1009 return; 1010 1011 out = pstats->phy_statistical_counters; 1012 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1013 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1014 } 1015 1016 #define PPORT_ETH_EXT_OFF(c) \ 1017 MLX5_BYTE_OFF(ppcnt_reg, \ 1018 counter_set.eth_extended_cntrs_grp_data_layout.c##_high) 1019 static const struct counter_desc pport_eth_ext_stats_desc[] = { 1020 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, 1021 }; 1022 1023 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) 1024 1025 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext) 1026 { 1027 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1028 return NUM_PPORT_ETH_EXT_COUNTERS; 1029 1030 return 0; 1031 } 1032 1033 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) 1034 { 1035 int i; 1036 1037 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1038 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1039 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1040 pport_eth_ext_stats_desc[i].format); 1041 return idx; 1042 } 1043 1044 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) 1045 { 1046 int i; 1047 1048 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1049 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1050 data[idx++] = 1051 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, 1052 pport_eth_ext_stats_desc, i); 1053 return idx; 1054 } 1055 1056 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) 1057 { 1058 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1059 struct mlx5_core_dev *mdev = priv->mdev; 1060 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1061 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1062 void *out; 1063 1064 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) 1065 return; 1066 1067 MLX5_SET(ppcnt_reg, in, local_port, 1); 1068 out = pstats->eth_ext_counters; 1069 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 1070 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1071 } 1072 1073 #define PCIE_PERF_OFF(c) \ 1074 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) 1075 static const struct counter_desc pcie_perf_stats_desc[] = { 1076 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, 1077 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, 1078 }; 1079 1080 #define PCIE_PERF_OFF64(c) \ 1081 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) 1082 static const struct counter_desc pcie_perf_stats_desc64[] = { 1083 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, 1084 }; 1085 1086 static const struct counter_desc pcie_perf_stall_stats_desc[] = { 1087 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, 1088 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, 1089 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, 1090 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, 1091 }; 1092 1093 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) 1094 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) 1095 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) 1096 1097 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie) 1098 { 1099 int num_stats = 0; 1100 1101 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1102 num_stats += NUM_PCIE_PERF_COUNTERS; 1103 1104 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1105 num_stats += NUM_PCIE_PERF_COUNTERS64; 1106 1107 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1108 num_stats += NUM_PCIE_PERF_STALL_COUNTERS; 1109 1110 return num_stats; 1111 } 1112 1113 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) 1114 { 1115 int i; 1116 1117 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1118 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1119 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1120 pcie_perf_stats_desc[i].format); 1121 1122 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1123 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1124 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1125 pcie_perf_stats_desc64[i].format); 1126 1127 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1128 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1129 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1130 pcie_perf_stall_stats_desc[i].format); 1131 return idx; 1132 } 1133 1134 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) 1135 { 1136 int i; 1137 1138 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1139 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1140 data[idx++] = 1141 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 1142 pcie_perf_stats_desc, i); 1143 1144 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1145 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1146 data[idx++] = 1147 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, 1148 pcie_perf_stats_desc64, i); 1149 1150 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1151 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1152 data[idx++] = 1153 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 1154 pcie_perf_stall_stats_desc, i); 1155 return idx; 1156 } 1157 1158 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) 1159 { 1160 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; 1161 struct mlx5_core_dev *mdev = priv->mdev; 1162 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; 1163 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 1164 void *out; 1165 1166 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) 1167 return; 1168 1169 out = pcie_stats->pcie_perf_counters; 1170 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 1171 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 1172 } 1173 1174 #define PPORT_PER_TC_PRIO_OFF(c) \ 1175 MLX5_BYTE_OFF(ppcnt_reg, \ 1176 counter_set.eth_per_tc_prio_grp_data_layout.c##_high) 1177 1178 static const struct counter_desc pport_per_tc_prio_stats_desc[] = { 1179 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) }, 1180 }; 1181 1182 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc) 1183 1184 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ 1185 MLX5_BYTE_OFF(ppcnt_reg, \ 1186 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) 1187 1188 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = { 1189 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) }, 1190 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) }, 1191 }; 1192 1193 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \ 1194 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc) 1195 1196 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv) 1197 { 1198 struct mlx5_core_dev *mdev = priv->mdev; 1199 1200 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1201 return 0; 1202 1203 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; 1204 } 1205 1206 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) 1207 { 1208 struct mlx5_core_dev *mdev = priv->mdev; 1209 int i, prio; 1210 1211 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1212 return idx; 1213 1214 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1215 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1216 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1217 pport_per_tc_prio_stats_desc[i].format, prio); 1218 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) 1219 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1220 pport_per_tc_congest_prio_stats_desc[i].format, prio); 1221 } 1222 1223 return idx; 1224 } 1225 1226 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) 1227 { 1228 struct mlx5e_pport_stats *pport = &priv->stats.pport; 1229 struct mlx5_core_dev *mdev = priv->mdev; 1230 int i, prio; 1231 1232 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1233 return idx; 1234 1235 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1236 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1237 data[idx++] = 1238 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio], 1239 pport_per_tc_prio_stats_desc, i); 1240 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) 1241 data[idx++] = 1242 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio], 1243 pport_per_tc_congest_prio_stats_desc, i); 1244 } 1245 1246 return idx; 1247 } 1248 1249 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) 1250 { 1251 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1252 struct mlx5_core_dev *mdev = priv->mdev; 1253 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1254 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1255 void *out; 1256 int prio; 1257 1258 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1259 return; 1260 1261 MLX5_SET(ppcnt_reg, in, pnat, 2); 1262 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP); 1263 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1264 out = pstats->per_tc_prio_counters[prio]; 1265 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1266 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1267 } 1268 } 1269 1270 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv) 1271 { 1272 struct mlx5_core_dev *mdev = priv->mdev; 1273 1274 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1275 return 0; 1276 1277 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO; 1278 } 1279 1280 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv) 1281 { 1282 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1283 struct mlx5_core_dev *mdev = priv->mdev; 1284 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1285 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1286 void *out; 1287 int prio; 1288 1289 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1290 return; 1291 1292 MLX5_SET(ppcnt_reg, in, pnat, 2); 1293 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP); 1294 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1295 out = pstats->per_tc_congest_prio_counters[prio]; 1296 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1297 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1298 } 1299 } 1300 1301 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest) 1302 { 1303 return mlx5e_grp_per_tc_prio_get_num_stats(priv) + 1304 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); 1305 } 1306 1307 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest) 1308 { 1309 mlx5e_grp_per_tc_prio_update_stats(priv); 1310 mlx5e_grp_per_tc_congest_prio_update_stats(priv); 1311 } 1312 1313 #define PPORT_PER_PRIO_OFF(c) \ 1314 MLX5_BYTE_OFF(ppcnt_reg, \ 1315 counter_set.eth_per_prio_grp_data_layout.c##_high) 1316 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { 1317 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, 1318 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, 1319 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) }, 1320 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, 1321 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, 1322 }; 1323 1324 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 1325 1326 static int mlx5e_grp_per_prio_traffic_get_num_stats(void) 1327 { 1328 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO; 1329 } 1330 1331 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, 1332 u8 *data, 1333 int idx) 1334 { 1335 int i, prio; 1336 1337 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1338 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1339 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1340 pport_per_prio_traffic_stats_desc[i].format, prio); 1341 } 1342 1343 return idx; 1344 } 1345 1346 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, 1347 u64 *data, 1348 int idx) 1349 { 1350 int i, prio; 1351 1352 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1353 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1354 data[idx++] = 1355 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 1356 pport_per_prio_traffic_stats_desc, i); 1357 } 1358 1359 return idx; 1360 } 1361 1362 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { 1363 /* %s is "global" or "prio{i}" */ 1364 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, 1365 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, 1366 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, 1367 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, 1368 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 1369 }; 1370 1371 static const struct counter_desc pport_pfc_stall_stats_desc[] = { 1372 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, 1373 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, 1374 }; 1375 1376 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) 1377 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \ 1378 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \ 1379 MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) 1380 1381 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) 1382 { 1383 struct mlx5_core_dev *mdev = priv->mdev; 1384 u8 pfc_en_tx; 1385 u8 pfc_en_rx; 1386 int err; 1387 1388 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1389 return 0; 1390 1391 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); 1392 1393 return err ? 0 : pfc_en_tx | pfc_en_rx; 1394 } 1395 1396 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) 1397 { 1398 struct mlx5_core_dev *mdev = priv->mdev; 1399 u32 rx_pause; 1400 u32 tx_pause; 1401 int err; 1402 1403 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1404 return false; 1405 1406 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); 1407 1408 return err ? false : rx_pause | tx_pause; 1409 } 1410 1411 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) 1412 { 1413 return (mlx5e_query_global_pause_combined(priv) + 1414 hweight8(mlx5e_query_pfc_combined(priv))) * 1415 NUM_PPORT_PER_PRIO_PFC_COUNTERS + 1416 NUM_PPORT_PFC_STALL_COUNTERS(priv); 1417 } 1418 1419 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, 1420 u8 *data, 1421 int idx) 1422 { 1423 unsigned long pfc_combined; 1424 int i, prio; 1425 1426 pfc_combined = mlx5e_query_pfc_combined(priv); 1427 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 1428 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1429 char pfc_string[ETH_GSTRING_LEN]; 1430 1431 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); 1432 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1433 pport_per_prio_pfc_stats_desc[i].format, pfc_string); 1434 } 1435 } 1436 1437 if (mlx5e_query_global_pause_combined(priv)) { 1438 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1439 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1440 pport_per_prio_pfc_stats_desc[i].format, "global"); 1441 } 1442 } 1443 1444 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 1445 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1446 pport_pfc_stall_stats_desc[i].format); 1447 1448 return idx; 1449 } 1450 1451 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, 1452 u64 *data, 1453 int idx) 1454 { 1455 unsigned long pfc_combined; 1456 int i, prio; 1457 1458 pfc_combined = mlx5e_query_pfc_combined(priv); 1459 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 1460 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1461 data[idx++] = 1462 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 1463 pport_per_prio_pfc_stats_desc, i); 1464 } 1465 } 1466 1467 if (mlx5e_query_global_pause_combined(priv)) { 1468 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1469 data[idx++] = 1470 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 1471 pport_per_prio_pfc_stats_desc, i); 1472 } 1473 } 1474 1475 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 1476 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 1477 pport_pfc_stall_stats_desc, i); 1478 1479 return idx; 1480 } 1481 1482 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) 1483 { 1484 return mlx5e_grp_per_prio_traffic_get_num_stats() + 1485 mlx5e_grp_per_prio_pfc_get_num_stats(priv); 1486 } 1487 1488 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) 1489 { 1490 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); 1491 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); 1492 return idx; 1493 } 1494 1495 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) 1496 { 1497 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); 1498 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); 1499 return idx; 1500 } 1501 1502 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) 1503 { 1504 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1505 struct mlx5_core_dev *mdev = priv->mdev; 1506 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1507 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1508 int prio; 1509 void *out; 1510 1511 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 1512 return; 1513 1514 MLX5_SET(ppcnt_reg, in, local_port, 1); 1515 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 1516 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1517 out = pstats->per_prio_counters[prio]; 1518 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1519 mlx5_core_access_reg(mdev, in, sz, out, sz, 1520 MLX5_REG_PPCNT, 0, 0); 1521 } 1522 } 1523 1524 static const struct counter_desc mlx5e_pme_status_desc[] = { 1525 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED }, 1526 }; 1527 1528 static const struct counter_desc mlx5e_pme_error_desc[] = { 1529 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK }, 1530 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE }, 1531 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE }, 1532 }; 1533 1534 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) 1535 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) 1536 1537 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme) 1538 { 1539 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; 1540 } 1541 1542 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) 1543 { 1544 int i; 1545 1546 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 1547 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); 1548 1549 for (i = 0; i < NUM_PME_ERR_STATS; i++) 1550 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); 1551 1552 return idx; 1553 } 1554 1555 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) 1556 { 1557 struct mlx5_pme_stats pme_stats; 1558 int i; 1559 1560 mlx5_get_pme_stats(priv->mdev, &pme_stats); 1561 1562 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 1563 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters, 1564 mlx5e_pme_status_desc, i); 1565 1566 for (i = 0; i < NUM_PME_ERR_STATS; i++) 1567 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters, 1568 mlx5e_pme_error_desc, i); 1569 1570 return idx; 1571 } 1572 1573 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } 1574 1575 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) 1576 { 1577 return mlx5e_tls_get_count(priv); 1578 } 1579 1580 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) 1581 { 1582 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); 1583 } 1584 1585 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) 1586 { 1587 return idx + mlx5e_tls_get_stats(priv, data + idx); 1588 } 1589 1590 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } 1591 1592 static const struct counter_desc rq_stats_desc[] = { 1593 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 1594 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 1595 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 1596 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 1597 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 1598 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 1599 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1600 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 1601 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 1602 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 1603 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, 1604 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, 1605 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, 1606 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 1607 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 1608 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1609 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1610 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 1611 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1612 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1613 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1614 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, 1615 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, 1616 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, 1617 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, 1618 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, 1619 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, 1620 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, 1621 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, 1622 #ifdef CONFIG_MLX5_EN_TLS 1623 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, 1624 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, 1625 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) }, 1626 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) }, 1627 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, 1628 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, 1629 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, 1630 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, 1631 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, 1632 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, 1633 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, 1634 #endif 1635 }; 1636 1637 static const struct counter_desc sq_stats_desc[] = { 1638 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, 1639 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, 1640 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 1641 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 1642 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 1643 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 1644 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 1645 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 1646 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 1647 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 1648 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 1649 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 1650 #ifdef CONFIG_MLX5_EN_TLS 1651 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 1652 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 1653 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, 1654 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 1655 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 1656 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 1657 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 1658 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 1659 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 1660 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 1661 #endif 1662 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 1663 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 1664 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, 1665 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 1666 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, 1667 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 1668 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, 1669 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 1670 }; 1671 1672 static const struct counter_desc rq_xdpsq_stats_desc[] = { 1673 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1674 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 1675 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 1676 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 1677 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1678 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1679 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1680 }; 1681 1682 static const struct counter_desc xdpsq_stats_desc[] = { 1683 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1684 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 1685 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 1686 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 1687 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1688 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1689 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1690 }; 1691 1692 static const struct counter_desc xskrq_stats_desc[] = { 1693 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) }, 1694 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) }, 1695 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 1696 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 1697 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1698 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) }, 1699 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 1700 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 1701 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 1702 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 1703 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 1704 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1705 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1706 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 1707 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1708 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1709 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1710 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 1711 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) }, 1712 }; 1713 1714 static const struct counter_desc xsksq_stats_desc[] = { 1715 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1716 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 1717 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 1718 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1719 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1720 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1721 }; 1722 1723 static const struct counter_desc ch_stats_desc[] = { 1724 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, 1725 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, 1726 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, 1727 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, 1728 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) }, 1729 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 1730 }; 1731 1732 static const struct counter_desc ptp_sq_stats_desc[] = { 1733 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) }, 1734 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) }, 1735 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 1736 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 1737 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 1738 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) }, 1739 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 1740 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) }, 1741 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) }, 1742 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 1743 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) }, 1744 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 1745 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) }, 1746 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 1747 }; 1748 1749 static const struct counter_desc ptp_ch_stats_desc[] = { 1750 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) }, 1751 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) }, 1752 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) }, 1753 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 1754 }; 1755 1756 static const struct counter_desc ptp_cq_stats_desc[] = { 1757 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) }, 1758 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, 1759 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, 1760 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, 1761 }; 1762 1763 static const struct counter_desc qos_sq_stats_desc[] = { 1764 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) }, 1765 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) }, 1766 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 1767 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 1768 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 1769 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 1770 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 1771 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 1772 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 1773 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) }, 1774 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 1775 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 1776 #ifdef CONFIG_MLX5_EN_TLS 1777 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 1778 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 1779 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, 1780 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 1781 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 1782 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 1783 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 1784 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 1785 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 1786 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 1787 #endif 1788 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 1789 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) }, 1790 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) }, 1791 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 1792 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) }, 1793 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) }, 1794 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) }, 1795 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 1796 }; 1797 1798 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 1799 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 1800 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) 1801 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) 1802 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) 1803 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) 1804 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) 1805 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) 1806 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) 1807 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) 1808 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc) 1809 1810 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) 1811 { 1812 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 1813 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs); 1814 } 1815 1816 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) 1817 { 1818 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 1819 u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 1820 int i, qid; 1821 1822 for (qid = 0; qid < max_qos_sqs; qid++) 1823 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 1824 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1825 qos_sq_stats_desc[i].format, qid); 1826 1827 return idx; 1828 } 1829 1830 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) 1831 { 1832 struct mlx5e_sq_stats **stats; 1833 u16 max_qos_sqs; 1834 int i, qid; 1835 1836 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 1837 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 1838 stats = READ_ONCE(priv->htb.qos_sq_stats); 1839 1840 for (qid = 0; qid < max_qos_sqs; qid++) { 1841 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); 1842 1843 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 1844 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i); 1845 } 1846 1847 return idx; 1848 } 1849 1850 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } 1851 1852 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) 1853 { 1854 return priv->port_ptp_opened ? 1855 NUM_PTP_CH_STATS + 1856 ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) : 1857 0; 1858 } 1859 1860 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) 1861 { 1862 int i, tc; 1863 1864 if (!priv->port_ptp_opened) 1865 return idx; 1866 1867 for (i = 0; i < NUM_PTP_CH_STATS; i++) 1868 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1869 ptp_ch_stats_desc[i].format); 1870 1871 for (tc = 0; tc < priv->max_opened_tc; tc++) 1872 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 1873 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1874 ptp_sq_stats_desc[i].format, tc); 1875 1876 for (tc = 0; tc < priv->max_opened_tc; tc++) 1877 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 1878 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1879 ptp_cq_stats_desc[i].format, tc); 1880 return idx; 1881 } 1882 1883 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) 1884 { 1885 int i, tc; 1886 1887 if (!priv->port_ptp_opened) 1888 return idx; 1889 1890 for (i = 0; i < NUM_PTP_CH_STATS; i++) 1891 data[idx++] = 1892 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch, 1893 ptp_ch_stats_desc, i); 1894 1895 for (tc = 0; tc < priv->max_opened_tc; tc++) 1896 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 1897 data[idx++] = 1898 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc], 1899 ptp_sq_stats_desc, i); 1900 1901 for (tc = 0; tc < priv->max_opened_tc; tc++) 1902 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 1903 data[idx++] = 1904 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc], 1905 ptp_cq_stats_desc, i); 1906 1907 return idx; 1908 } 1909 1910 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } 1911 1912 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) 1913 { 1914 int max_nch = priv->max_nch; 1915 1916 return (NUM_RQ_STATS * max_nch) + 1917 (NUM_CH_STATS * max_nch) + 1918 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + 1919 (NUM_RQ_XDPSQ_STATS * max_nch) + 1920 (NUM_XDPSQ_STATS * max_nch) + 1921 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) + 1922 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used); 1923 } 1924 1925 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) 1926 { 1927 bool is_xsk = priv->xsk.ever_used; 1928 int max_nch = priv->max_nch; 1929 int i, j, tc; 1930 1931 for (i = 0; i < max_nch; i++) 1932 for (j = 0; j < NUM_CH_STATS; j++) 1933 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1934 ch_stats_desc[j].format, i); 1935 1936 for (i = 0; i < max_nch; i++) { 1937 for (j = 0; j < NUM_RQ_STATS; j++) 1938 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1939 rq_stats_desc[j].format, i); 1940 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 1941 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1942 xskrq_stats_desc[j].format, i); 1943 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 1944 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1945 rq_xdpsq_stats_desc[j].format, i); 1946 } 1947 1948 for (tc = 0; tc < priv->max_opened_tc; tc++) 1949 for (i = 0; i < max_nch; i++) 1950 for (j = 0; j < NUM_SQ_STATS; j++) 1951 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1952 sq_stats_desc[j].format, 1953 i + tc * max_nch); 1954 1955 for (i = 0; i < max_nch; i++) { 1956 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 1957 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1958 xsksq_stats_desc[j].format, i); 1959 for (j = 0; j < NUM_XDPSQ_STATS; j++) 1960 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1961 xdpsq_stats_desc[j].format, i); 1962 } 1963 1964 return idx; 1965 } 1966 1967 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) 1968 { 1969 bool is_xsk = priv->xsk.ever_used; 1970 int max_nch = priv->max_nch; 1971 int i, j, tc; 1972 1973 for (i = 0; i < max_nch; i++) 1974 for (j = 0; j < NUM_CH_STATS; j++) 1975 data[idx++] = 1976 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, 1977 ch_stats_desc, j); 1978 1979 for (i = 0; i < max_nch; i++) { 1980 for (j = 0; j < NUM_RQ_STATS; j++) 1981 data[idx++] = 1982 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, 1983 rq_stats_desc, j); 1984 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 1985 data[idx++] = 1986 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq, 1987 xskrq_stats_desc, j); 1988 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 1989 data[idx++] = 1990 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq, 1991 rq_xdpsq_stats_desc, j); 1992 } 1993 1994 for (tc = 0; tc < priv->max_opened_tc; tc++) 1995 for (i = 0; i < max_nch; i++) 1996 for (j = 0; j < NUM_SQ_STATS; j++) 1997 data[idx++] = 1998 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], 1999 sq_stats_desc, j); 2000 2001 for (i = 0; i < max_nch; i++) { 2002 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2003 data[idx++] = 2004 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq, 2005 xsksq_stats_desc, j); 2006 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2007 data[idx++] = 2008 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq, 2009 xdpsq_stats_desc, j); 2010 } 2011 2012 return idx; 2013 } 2014 2015 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } 2016 2017 MLX5E_DEFINE_STATS_GRP(sw, 0); 2018 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS); 2019 MLX5E_DEFINE_STATS_GRP(vnic_env, 0); 2020 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS); 2021 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS); 2022 MLX5E_DEFINE_STATS_GRP(2863, 0); 2023 MLX5E_DEFINE_STATS_GRP(2819, 0); 2024 MLX5E_DEFINE_STATS_GRP(phy, 0); 2025 MLX5E_DEFINE_STATS_GRP(pcie, 0); 2026 MLX5E_DEFINE_STATS_GRP(per_prio, 0); 2027 MLX5E_DEFINE_STATS_GRP(pme, 0); 2028 MLX5E_DEFINE_STATS_GRP(channels, 0); 2029 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); 2030 MLX5E_DEFINE_STATS_GRP(eth_ext, 0); 2031 static MLX5E_DEFINE_STATS_GRP(tls, 0); 2032 static MLX5E_DEFINE_STATS_GRP(ptp, 0); 2033 static MLX5E_DEFINE_STATS_GRP(qos, 0); 2034 2035 /* The stats groups order is opposite to the update_stats() order calls */ 2036 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { 2037 &MLX5E_STATS_GRP(sw), 2038 &MLX5E_STATS_GRP(qcnt), 2039 &MLX5E_STATS_GRP(vnic_env), 2040 &MLX5E_STATS_GRP(vport), 2041 &MLX5E_STATS_GRP(802_3), 2042 &MLX5E_STATS_GRP(2863), 2043 &MLX5E_STATS_GRP(2819), 2044 &MLX5E_STATS_GRP(phy), 2045 &MLX5E_STATS_GRP(eth_ext), 2046 &MLX5E_STATS_GRP(pcie), 2047 &MLX5E_STATS_GRP(per_prio), 2048 &MLX5E_STATS_GRP(pme), 2049 #ifdef CONFIG_MLX5_EN_IPSEC 2050 &MLX5E_STATS_GRP(ipsec_sw), 2051 &MLX5E_STATS_GRP(ipsec_hw), 2052 #endif 2053 &MLX5E_STATS_GRP(tls), 2054 &MLX5E_STATS_GRP(channels), 2055 &MLX5E_STATS_GRP(per_port_buff_congest), 2056 &MLX5E_STATS_GRP(ptp), 2057 &MLX5E_STATS_GRP(qos), 2058 }; 2059 2060 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) 2061 { 2062 return ARRAY_SIZE(mlx5e_nic_stats_grps); 2063 } 2064