1 /* 2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "lib/mlx5.h" 34 #include "en.h" 35 #include "en_accel/tls.h" 36 #include "en_accel/en_accel.h" 37 #include "en/ptp.h" 38 #include "en/port.h" 39 40 static unsigned int stats_grps_num(struct mlx5e_priv *priv) 41 { 42 return !priv->profile->stats_grps_num ? 0 : 43 priv->profile->stats_grps_num(priv); 44 } 45 46 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) 47 { 48 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 49 const unsigned int num_stats_grps = stats_grps_num(priv); 50 unsigned int total = 0; 51 int i; 52 53 for (i = 0; i < num_stats_grps; i++) 54 total += stats_grps[i]->get_num_stats(priv); 55 56 return total; 57 } 58 59 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv) 60 { 61 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 62 const unsigned int num_stats_grps = stats_grps_num(priv); 63 int i; 64 65 for (i = num_stats_grps - 1; i >= 0; i--) 66 if (stats_grps[i]->update_stats && 67 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS) 68 stats_grps[i]->update_stats(priv); 69 } 70 71 void mlx5e_stats_update(struct mlx5e_priv *priv) 72 { 73 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 74 const unsigned int num_stats_grps = stats_grps_num(priv); 75 int i; 76 77 for (i = num_stats_grps - 1; i >= 0; i--) 78 if (stats_grps[i]->update_stats) 79 stats_grps[i]->update_stats(priv); 80 } 81 82 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) 83 { 84 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 85 const unsigned int num_stats_grps = stats_grps_num(priv); 86 int i; 87 88 for (i = 0; i < num_stats_grps; i++) 89 idx = stats_grps[i]->fill_stats(priv, data, idx); 90 } 91 92 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) 93 { 94 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 95 const unsigned int num_stats_grps = stats_grps_num(priv); 96 int i, idx = 0; 97 98 for (i = 0; i < num_stats_grps; i++) 99 idx = stats_grps[i]->fill_strings(priv, data, idx); 100 } 101 102 /* Concrete NIC Stats */ 103 104 static const struct counter_desc sw_stats_desc[] = { 105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, 111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, 112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, 115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) }, 116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) }, 117 118 #ifdef CONFIG_MLX5_EN_TLS 119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, 120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, 121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, 122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, 123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, 124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, 125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, 126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, 127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, 128 #endif 129 130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, 132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) }, 133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) }, 134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) }, 135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) }, 136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, 137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, 138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, 139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, 140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, 143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, 144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, 147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, 148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, 149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, 150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, 151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, 153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, 154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, 155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, 160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, 161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, 162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, 166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, 167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, 168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, 169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, 170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, 171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, 174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, 175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, 179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, 180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, 181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, 182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, 183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, 184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, 185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, 186 #ifdef CONFIG_MLX5_EN_TLS 187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, 188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, 189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, 190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, 191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, 192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, 193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, 194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) }, 195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, 196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, 197 #endif 198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, 199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, 200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, 201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, 202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) }, 203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, 204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) }, 205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) }, 206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) }, 207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) }, 208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) }, 209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) }, 210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) }, 211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) }, 212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) }, 213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) }, 214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) }, 215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) }, 216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) }, 217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) }, 218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) }, 219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) }, 220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) }, 221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) }, 222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) }, 223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) }, 224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) }, 225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) }, 226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) }, 227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) }, 228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) }, 229 }; 230 231 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 232 233 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw) 234 { 235 return NUM_SW_COUNTERS; 236 } 237 238 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) 239 { 240 int i; 241 242 for (i = 0; i < NUM_SW_COUNTERS; i++) 243 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); 244 return idx; 245 } 246 247 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) 248 { 249 int i; 250 251 for (i = 0; i < NUM_SW_COUNTERS; i++) 252 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i); 253 return idx; 254 } 255 256 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, 257 struct mlx5e_xdpsq_stats *xdpsq_red_stats) 258 { 259 s->tx_xdp_xmit += xdpsq_red_stats->xmit; 260 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; 261 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; 262 s->tx_xdp_nops += xdpsq_red_stats->nops; 263 s->tx_xdp_full += xdpsq_red_stats->full; 264 s->tx_xdp_err += xdpsq_red_stats->err; 265 s->tx_xdp_cqes += xdpsq_red_stats->cqes; 266 } 267 268 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s, 269 struct mlx5e_xdpsq_stats *xdpsq_stats) 270 { 271 s->rx_xdp_tx_xmit += xdpsq_stats->xmit; 272 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; 273 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; 274 s->rx_xdp_tx_nops += xdpsq_stats->nops; 275 s->rx_xdp_tx_full += xdpsq_stats->full; 276 s->rx_xdp_tx_err += xdpsq_stats->err; 277 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; 278 } 279 280 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s, 281 struct mlx5e_xdpsq_stats *xsksq_stats) 282 { 283 s->tx_xsk_xmit += xsksq_stats->xmit; 284 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; 285 s->tx_xsk_inlnw += xsksq_stats->inlnw; 286 s->tx_xsk_full += xsksq_stats->full; 287 s->tx_xsk_err += xsksq_stats->err; 288 s->tx_xsk_cqes += xsksq_stats->cqes; 289 } 290 291 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, 292 struct mlx5e_rq_stats *xskrq_stats) 293 { 294 s->rx_xsk_packets += xskrq_stats->packets; 295 s->rx_xsk_bytes += xskrq_stats->bytes; 296 s->rx_xsk_csum_complete += xskrq_stats->csum_complete; 297 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; 298 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; 299 s->rx_xsk_csum_none += xskrq_stats->csum_none; 300 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; 301 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; 302 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; 303 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; 304 s->rx_xsk_wqe_err += xskrq_stats->wqe_err; 305 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; 306 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; 307 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; 308 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; 309 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; 310 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; 311 s->rx_xsk_congst_umr += xskrq_stats->congst_umr; 312 s->rx_xsk_arfs_err += xskrq_stats->arfs_err; 313 } 314 315 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, 316 struct mlx5e_rq_stats *rq_stats) 317 { 318 s->rx_packets += rq_stats->packets; 319 s->rx_bytes += rq_stats->bytes; 320 s->rx_lro_packets += rq_stats->lro_packets; 321 s->rx_lro_bytes += rq_stats->lro_bytes; 322 s->rx_gro_packets += rq_stats->gro_packets; 323 s->rx_gro_bytes += rq_stats->gro_bytes; 324 s->rx_gro_skbs += rq_stats->gro_skbs; 325 s->rx_gro_match_packets += rq_stats->gro_match_packets; 326 s->rx_gro_large_hds += rq_stats->gro_large_hds; 327 s->rx_ecn_mark += rq_stats->ecn_mark; 328 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; 329 s->rx_csum_none += rq_stats->csum_none; 330 s->rx_csum_complete += rq_stats->csum_complete; 331 s->rx_csum_complete_tail += rq_stats->csum_complete_tail; 332 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; 333 s->rx_csum_unnecessary += rq_stats->csum_unnecessary; 334 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 335 s->rx_xdp_drop += rq_stats->xdp_drop; 336 s->rx_xdp_redirect += rq_stats->xdp_redirect; 337 s->rx_wqe_err += rq_stats->wqe_err; 338 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 339 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; 340 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; 341 s->rx_buff_alloc_err += rq_stats->buff_alloc_err; 342 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; 343 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; 344 s->rx_cache_reuse += rq_stats->cache_reuse; 345 s->rx_cache_full += rq_stats->cache_full; 346 s->rx_cache_empty += rq_stats->cache_empty; 347 s->rx_cache_busy += rq_stats->cache_busy; 348 s->rx_cache_waive += rq_stats->cache_waive; 349 s->rx_congst_umr += rq_stats->congst_umr; 350 s->rx_arfs_err += rq_stats->arfs_err; 351 s->rx_recover += rq_stats->recover; 352 #ifdef CONFIG_MLX5_EN_TLS 353 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; 354 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; 355 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; 356 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; 357 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; 358 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; 359 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; 360 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry; 361 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; 362 s->rx_tls_err += rq_stats->tls_err; 363 #endif 364 } 365 366 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s, 367 struct mlx5e_ch_stats *ch_stats) 368 { 369 s->ch_events += ch_stats->events; 370 s->ch_poll += ch_stats->poll; 371 s->ch_arm += ch_stats->arm; 372 s->ch_aff_change += ch_stats->aff_change; 373 s->ch_force_irq += ch_stats->force_irq; 374 s->ch_eq_rearm += ch_stats->eq_rearm; 375 } 376 377 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, 378 struct mlx5e_sq_stats *sq_stats) 379 { 380 s->tx_packets += sq_stats->packets; 381 s->tx_bytes += sq_stats->bytes; 382 s->tx_tso_packets += sq_stats->tso_packets; 383 s->tx_tso_bytes += sq_stats->tso_bytes; 384 s->tx_tso_inner_packets += sq_stats->tso_inner_packets; 385 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; 386 s->tx_added_vlan_packets += sq_stats->added_vlan_packets; 387 s->tx_nop += sq_stats->nop; 388 s->tx_mpwqe_blks += sq_stats->mpwqe_blks; 389 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; 390 s->tx_queue_stopped += sq_stats->stopped; 391 s->tx_queue_wake += sq_stats->wake; 392 s->tx_queue_dropped += sq_stats->dropped; 393 s->tx_cqe_err += sq_stats->cqe_err; 394 s->tx_recover += sq_stats->recover; 395 s->tx_xmit_more += sq_stats->xmit_more; 396 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 397 s->tx_csum_none += sq_stats->csum_none; 398 s->tx_csum_partial += sq_stats->csum_partial; 399 #ifdef CONFIG_MLX5_EN_TLS 400 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; 401 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; 402 s->tx_tls_ooo += sq_stats->tls_ooo; 403 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; 404 s->tx_tls_dump_packets += sq_stats->tls_dump_packets; 405 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; 406 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; 407 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; 408 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; 409 #endif 410 s->tx_cqes += sq_stats->cqes; 411 } 412 413 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, 414 struct mlx5e_sw_stats *s) 415 { 416 int i; 417 418 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 419 return; 420 421 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch); 422 423 if (priv->tx_ptp_opened) { 424 for (i = 0; i < priv->max_opened_tc; i++) { 425 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]); 426 427 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 428 barrier(); 429 } 430 } 431 if (priv->rx_ptp_opened) { 432 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq); 433 434 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 435 barrier(); 436 } 437 } 438 439 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, 440 struct mlx5e_sw_stats *s) 441 { 442 struct mlx5e_sq_stats **stats; 443 u16 max_qos_sqs; 444 int i; 445 446 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 447 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 448 stats = READ_ONCE(priv->htb.qos_sq_stats); 449 450 for (i = 0; i < max_qos_sqs; i++) { 451 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); 452 453 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 454 barrier(); 455 } 456 } 457 458 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) 459 { 460 struct mlx5e_sw_stats *s = &priv->stats.sw; 461 int i; 462 463 memset(s, 0, sizeof(*s)); 464 465 for (i = 0; i < priv->stats_nch; i++) { 466 struct mlx5e_channel_stats *channel_stats = 467 priv->channel_stats[i]; 468 int j; 469 470 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); 471 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); 472 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); 473 /* xdp redirect */ 474 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq); 475 /* AF_XDP zero-copy */ 476 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq); 477 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq); 478 479 for (j = 0; j < priv->max_opened_tc; j++) { 480 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]); 481 482 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 483 barrier(); 484 } 485 } 486 mlx5e_stats_grp_sw_update_stats_ptp(priv, s); 487 mlx5e_stats_grp_sw_update_stats_qos(priv, s); 488 } 489 490 static const struct counter_desc q_stats_desc[] = { 491 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, 492 }; 493 494 static const struct counter_desc drop_rq_stats_desc[] = { 495 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) }, 496 }; 497 498 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) 499 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) 500 501 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) 502 { 503 int num_stats = 0; 504 505 if (priv->q_counter) 506 num_stats += NUM_Q_COUNTERS; 507 508 if (priv->drop_rq_q_counter) 509 num_stats += NUM_DROP_RQ_COUNTERS; 510 511 return num_stats; 512 } 513 514 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) 515 { 516 int i; 517 518 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) 519 strcpy(data + (idx++) * ETH_GSTRING_LEN, 520 q_stats_desc[i].format); 521 522 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 523 strcpy(data + (idx++) * ETH_GSTRING_LEN, 524 drop_rq_stats_desc[i].format); 525 526 return idx; 527 } 528 529 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) 530 { 531 int i; 532 533 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) 534 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 535 q_stats_desc, i); 536 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 537 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 538 drop_rq_stats_desc, i); 539 return idx; 540 } 541 542 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) 543 { 544 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; 545 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; 546 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; 547 int ret; 548 549 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); 550 551 if (priv->q_counter) { 552 MLX5_SET(query_q_counter_in, in, counter_set_id, 553 priv->q_counter); 554 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 555 if (!ret) 556 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, 557 out, out_of_buffer); 558 } 559 560 if (priv->drop_rq_q_counter) { 561 MLX5_SET(query_q_counter_in, in, counter_set_id, 562 priv->drop_rq_q_counter); 563 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 564 if (!ret) 565 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, 566 out, out_of_buffer); 567 } 568 } 569 570 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) 571 static const struct counter_desc vnic_env_stats_steer_desc[] = { 572 { "rx_steer_missed_packets", 573 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, 574 }; 575 576 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { 577 { "dev_internal_queue_oob", 578 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, 579 }; 580 581 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ 582 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ 583 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) 584 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ 585 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ 586 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) 587 588 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) 589 { 590 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + 591 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); 592 } 593 594 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) 595 { 596 int i; 597 598 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 599 strcpy(data + (idx++) * ETH_GSTRING_LEN, 600 vnic_env_stats_steer_desc[i].format); 601 602 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 603 strcpy(data + (idx++) * ETH_GSTRING_LEN, 604 vnic_env_stats_dev_oob_desc[i].format); 605 return idx; 606 } 607 608 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) 609 { 610 int i; 611 612 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 613 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, 614 vnic_env_stats_steer_desc, i); 615 616 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 617 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, 618 vnic_env_stats_dev_oob_desc, i); 619 return idx; 620 } 621 622 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) 623 { 624 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; 625 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 626 struct mlx5_core_dev *mdev = priv->mdev; 627 628 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 629 return; 630 631 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); 632 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); 633 } 634 635 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) 636 static const struct counter_desc vport_stats_desc[] = { 637 { "rx_vport_unicast_packets", 638 VPORT_COUNTER_OFF(received_eth_unicast.packets) }, 639 { "rx_vport_unicast_bytes", 640 VPORT_COUNTER_OFF(received_eth_unicast.octets) }, 641 { "tx_vport_unicast_packets", 642 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, 643 { "tx_vport_unicast_bytes", 644 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, 645 { "rx_vport_multicast_packets", 646 VPORT_COUNTER_OFF(received_eth_multicast.packets) }, 647 { "rx_vport_multicast_bytes", 648 VPORT_COUNTER_OFF(received_eth_multicast.octets) }, 649 { "tx_vport_multicast_packets", 650 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, 651 { "tx_vport_multicast_bytes", 652 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, 653 { "rx_vport_broadcast_packets", 654 VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, 655 { "rx_vport_broadcast_bytes", 656 VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, 657 { "tx_vport_broadcast_packets", 658 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, 659 { "tx_vport_broadcast_bytes", 660 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, 661 { "rx_vport_rdma_unicast_packets", 662 VPORT_COUNTER_OFF(received_ib_unicast.packets) }, 663 { "rx_vport_rdma_unicast_bytes", 664 VPORT_COUNTER_OFF(received_ib_unicast.octets) }, 665 { "tx_vport_rdma_unicast_packets", 666 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, 667 { "tx_vport_rdma_unicast_bytes", 668 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, 669 { "rx_vport_rdma_multicast_packets", 670 VPORT_COUNTER_OFF(received_ib_multicast.packets) }, 671 { "rx_vport_rdma_multicast_bytes", 672 VPORT_COUNTER_OFF(received_ib_multicast.octets) }, 673 { "tx_vport_rdma_multicast_packets", 674 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, 675 { "tx_vport_rdma_multicast_bytes", 676 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, 677 }; 678 679 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) 680 681 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport) 682 { 683 return NUM_VPORT_COUNTERS; 684 } 685 686 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) 687 { 688 int i; 689 690 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 691 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format); 692 return idx; 693 } 694 695 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) 696 { 697 int i; 698 699 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 700 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, 701 vport_stats_desc, i); 702 return idx; 703 } 704 705 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) 706 { 707 u32 *out = (u32 *)priv->stats.vport.query_vport_out; 708 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 709 struct mlx5_core_dev *mdev = priv->mdev; 710 711 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); 712 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out); 713 } 714 715 #define PPORT_802_3_OFF(c) \ 716 MLX5_BYTE_OFF(ppcnt_reg, \ 717 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) 718 static const struct counter_desc pport_802_3_stats_desc[] = { 719 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, 720 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, 721 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, 722 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, 723 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, 724 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, 725 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, 726 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, 727 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, 728 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, 729 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, 730 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, 731 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, 732 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, 733 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, 734 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, 735 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, 736 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, 737 }; 738 739 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 740 741 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3) 742 { 743 return NUM_PPORT_802_3_COUNTERS; 744 } 745 746 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) 747 { 748 int i; 749 750 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 751 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format); 752 return idx; 753 } 754 755 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) 756 { 757 int i; 758 759 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 760 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, 761 pport_802_3_stats_desc, i); 762 return idx; 763 } 764 765 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ 766 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) 767 768 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) 769 { 770 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 771 struct mlx5_core_dev *mdev = priv->mdev; 772 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 773 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 774 void *out; 775 776 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 777 return; 778 779 MLX5_SET(ppcnt_reg, in, local_port, 1); 780 out = pstats->IEEE_802_3_counters; 781 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 782 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 783 } 784 785 #define MLX5E_READ_CTR64_BE_F(ptr, set, c) \ 786 be64_to_cpu(*(__be64 *)((char *)ptr + \ 787 MLX5_BYTE_OFF(ppcnt_reg, \ 788 counter_set.set.c##_high))) 789 790 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev, 791 u32 *ppcnt_ieee_802_3) 792 { 793 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 794 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 795 796 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 797 return -EOPNOTSUPP; 798 799 MLX5_SET(ppcnt_reg, in, local_port, 1); 800 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 801 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, 802 sz, MLX5_REG_PPCNT, 0, 0); 803 } 804 805 void mlx5e_stats_pause_get(struct mlx5e_priv *priv, 806 struct ethtool_pause_stats *pause_stats) 807 { 808 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 809 struct mlx5_core_dev *mdev = priv->mdev; 810 811 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 812 return; 813 814 pause_stats->tx_pause_frames = 815 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 816 eth_802_3_cntrs_grp_data_layout, 817 a_pause_mac_ctrl_frames_transmitted); 818 pause_stats->rx_pause_frames = 819 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 820 eth_802_3_cntrs_grp_data_layout, 821 a_pause_mac_ctrl_frames_received); 822 } 823 824 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, 825 struct ethtool_eth_phy_stats *phy_stats) 826 { 827 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 828 struct mlx5_core_dev *mdev = priv->mdev; 829 830 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 831 return; 832 833 phy_stats->SymbolErrorDuringCarrier = 834 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 835 eth_802_3_cntrs_grp_data_layout, 836 a_symbol_error_during_carrier); 837 } 838 839 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, 840 struct ethtool_eth_mac_stats *mac_stats) 841 { 842 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 843 struct mlx5_core_dev *mdev = priv->mdev; 844 845 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 846 return; 847 848 #define RD(name) \ 849 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \ 850 eth_802_3_cntrs_grp_data_layout, \ 851 name) 852 853 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok); 854 mac_stats->FramesReceivedOK = RD(a_frames_received_ok); 855 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors); 856 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok); 857 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok); 858 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok); 859 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok); 860 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok); 861 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok); 862 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors); 863 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field); 864 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors); 865 #undef RD 866 } 867 868 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, 869 struct ethtool_eth_ctrl_stats *ctrl_stats) 870 { 871 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 872 struct mlx5_core_dev *mdev = priv->mdev; 873 874 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 875 return; 876 877 ctrl_stats->MACControlFramesTransmitted = 878 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 879 eth_802_3_cntrs_grp_data_layout, 880 a_mac_control_frames_transmitted); 881 ctrl_stats->MACControlFramesReceived = 882 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 883 eth_802_3_cntrs_grp_data_layout, 884 a_mac_control_frames_received); 885 ctrl_stats->UnsupportedOpcodesReceived = 886 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 887 eth_802_3_cntrs_grp_data_layout, 888 a_unsupported_opcodes_received); 889 } 890 891 #define PPORT_2863_OFF(c) \ 892 MLX5_BYTE_OFF(ppcnt_reg, \ 893 counter_set.eth_2863_cntrs_grp_data_layout.c##_high) 894 static const struct counter_desc pport_2863_stats_desc[] = { 895 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, 896 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, 897 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, 898 }; 899 900 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 901 902 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863) 903 { 904 return NUM_PPORT_2863_COUNTERS; 905 } 906 907 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) 908 { 909 int i; 910 911 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 912 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format); 913 return idx; 914 } 915 916 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) 917 { 918 int i; 919 920 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 921 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, 922 pport_2863_stats_desc, i); 923 return idx; 924 } 925 926 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) 927 { 928 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 929 struct mlx5_core_dev *mdev = priv->mdev; 930 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 931 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 932 void *out; 933 934 MLX5_SET(ppcnt_reg, in, local_port, 1); 935 out = pstats->RFC_2863_counters; 936 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 937 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 938 } 939 940 #define PPORT_2819_OFF(c) \ 941 MLX5_BYTE_OFF(ppcnt_reg, \ 942 counter_set.eth_2819_cntrs_grp_data_layout.c##_high) 943 static const struct counter_desc pport_2819_stats_desc[] = { 944 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, 945 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, 946 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, 947 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, 948 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, 949 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, 950 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, 951 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, 952 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, 953 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, 954 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, 955 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, 956 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, 957 }; 958 959 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 960 961 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819) 962 { 963 return NUM_PPORT_2819_COUNTERS; 964 } 965 966 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) 967 { 968 int i; 969 970 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 971 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); 972 return idx; 973 } 974 975 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) 976 { 977 int i; 978 979 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 980 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, 981 pport_2819_stats_desc, i); 982 return idx; 983 } 984 985 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) 986 { 987 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 988 struct mlx5_core_dev *mdev = priv->mdev; 989 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 990 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 991 void *out; 992 993 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 994 return; 995 996 MLX5_SET(ppcnt_reg, in, local_port, 1); 997 out = pstats->RFC_2819_counters; 998 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 999 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1000 } 1001 1002 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = { 1003 { 0, 64 }, 1004 { 65, 127 }, 1005 { 128, 255 }, 1006 { 256, 511 }, 1007 { 512, 1023 }, 1008 { 1024, 1518 }, 1009 { 1519, 2047 }, 1010 { 2048, 4095 }, 1011 { 4096, 8191 }, 1012 { 8192, 10239 }, 1013 {} 1014 }; 1015 1016 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, 1017 struct ethtool_rmon_stats *rmon, 1018 const struct ethtool_rmon_hist_range **ranges) 1019 { 1020 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)]; 1021 struct mlx5_core_dev *mdev = priv->mdev; 1022 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1023 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1024 1025 MLX5_SET(ppcnt_reg, in, local_port, 1); 1026 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 1027 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters, 1028 sz, MLX5_REG_PPCNT, 0, 0)) 1029 return; 1030 1031 #define RD(name) \ 1032 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \ 1033 eth_2819_cntrs_grp_data_layout, \ 1034 name) 1035 1036 rmon->undersize_pkts = RD(ether_stats_undersize_pkts); 1037 rmon->fragments = RD(ether_stats_fragments); 1038 rmon->jabbers = RD(ether_stats_jabbers); 1039 1040 rmon->hist[0] = RD(ether_stats_pkts64octets); 1041 rmon->hist[1] = RD(ether_stats_pkts65to127octets); 1042 rmon->hist[2] = RD(ether_stats_pkts128to255octets); 1043 rmon->hist[3] = RD(ether_stats_pkts256to511octets); 1044 rmon->hist[4] = RD(ether_stats_pkts512to1023octets); 1045 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets); 1046 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets); 1047 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets); 1048 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets); 1049 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets); 1050 #undef RD 1051 1052 *ranges = mlx5e_rmon_ranges; 1053 } 1054 1055 #define PPORT_PHY_STATISTICAL_OFF(c) \ 1056 MLX5_BYTE_OFF(ppcnt_reg, \ 1057 counter_set.phys_layer_statistical_cntrs.c##_high) 1058 static const struct counter_desc pport_phy_statistical_stats_desc[] = { 1059 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, 1060 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, 1061 }; 1062 1063 static const struct counter_desc 1064 pport_phy_statistical_err_lanes_stats_desc[] = { 1065 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) }, 1066 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) }, 1067 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) }, 1068 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, 1069 }; 1070 1071 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ 1072 ARRAY_SIZE(pport_phy_statistical_stats_desc) 1073 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ 1074 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) 1075 1076 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) 1077 { 1078 struct mlx5_core_dev *mdev = priv->mdev; 1079 int num_stats; 1080 1081 /* "1" for link_down_events special counter */ 1082 num_stats = 1; 1083 1084 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ? 1085 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0; 1086 1087 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ? 1088 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0; 1089 1090 return num_stats; 1091 } 1092 1093 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) 1094 { 1095 struct mlx5_core_dev *mdev = priv->mdev; 1096 int i; 1097 1098 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); 1099 1100 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1101 return idx; 1102 1103 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) 1104 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1105 pport_phy_statistical_stats_desc[i].format); 1106 1107 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) 1108 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) 1109 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1110 pport_phy_statistical_err_lanes_stats_desc[i].format); 1111 1112 return idx; 1113 } 1114 1115 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) 1116 { 1117 struct mlx5_core_dev *mdev = priv->mdev; 1118 int i; 1119 1120 /* link_down_events_phy has special handling since it is not stored in __be64 format */ 1121 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, 1122 counter_set.phys_layer_cntrs.link_down_events); 1123 1124 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1125 return idx; 1126 1127 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) 1128 data[idx++] = 1129 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, 1130 pport_phy_statistical_stats_desc, i); 1131 1132 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) 1133 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) 1134 data[idx++] = 1135 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, 1136 pport_phy_statistical_err_lanes_stats_desc, 1137 i); 1138 return idx; 1139 } 1140 1141 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) 1142 { 1143 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1144 struct mlx5_core_dev *mdev = priv->mdev; 1145 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1146 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1147 void *out; 1148 1149 MLX5_SET(ppcnt_reg, in, local_port, 1); 1150 out = pstats->phy_counters; 1151 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1152 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1153 1154 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1155 return; 1156 1157 out = pstats->phy_statistical_counters; 1158 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1159 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1160 } 1161 1162 static int fec_num_lanes(struct mlx5_core_dev *dev) 1163 { 1164 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {}; 1165 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {}; 1166 int err; 1167 1168 MLX5_SET(pmlp_reg, in, local_port, 1); 1169 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 1170 MLX5_REG_PMLP, 0, 0); 1171 if (err) 1172 return 0; 1173 1174 return MLX5_GET(pmlp_reg, out, width); 1175 } 1176 1177 static int fec_active_mode(struct mlx5_core_dev *mdev) 1178 { 1179 unsigned long fec_active_long; 1180 u32 fec_active; 1181 1182 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL)) 1183 return MLX5E_FEC_NOFEC; 1184 1185 fec_active_long = fec_active; 1186 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE); 1187 } 1188 1189 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \ 1190 fec_stats->corrected_blocks.lanes[(idx)] = \ 1191 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ 1192 fc_fec_corrected_blocks_lane##idx); \ 1193 fec_stats->uncorrectable_blocks.lanes[(idx)] = \ 1194 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ 1195 fc_fec_uncorrectable_blocks_lane##idx); \ 1196 }) 1197 1198 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats, 1199 u32 *ppcnt, u8 lanes) 1200 { 1201 if (lanes > 3) { /* 4 lanes */ 1202 MLX5E_STATS_SET_FEC_BLOCK(3); 1203 MLX5E_STATS_SET_FEC_BLOCK(2); 1204 } 1205 if (lanes > 1) /* 2 lanes */ 1206 MLX5E_STATS_SET_FEC_BLOCK(1); 1207 if (lanes > 0) /* 1 lane */ 1208 MLX5E_STATS_SET_FEC_BLOCK(0); 1209 } 1210 1211 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt) 1212 { 1213 fec_stats->corrected_blocks.total = 1214 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, 1215 rs_fec_corrected_blocks); 1216 fec_stats->uncorrectable_blocks.total = 1217 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, 1218 rs_fec_uncorrectable_blocks); 1219 } 1220 1221 static void fec_set_block_stats(struct mlx5e_priv *priv, 1222 struct ethtool_fec_stats *fec_stats) 1223 { 1224 struct mlx5_core_dev *mdev = priv->mdev; 1225 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1226 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1227 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1228 int mode = fec_active_mode(mdev); 1229 1230 if (mode == MLX5E_FEC_NOFEC) 1231 return; 1232 1233 MLX5_SET(ppcnt_reg, in, local_port, 1); 1234 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1235 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0)) 1236 return; 1237 1238 switch (mode) { 1239 case MLX5E_FEC_RS_528_514: 1240 case MLX5E_FEC_RS_544_514: 1241 case MLX5E_FEC_LLRS_272_257_1: 1242 fec_set_rs_stats(fec_stats, out); 1243 return; 1244 case MLX5E_FEC_FIRECODE: 1245 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev)); 1246 } 1247 } 1248 1249 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, 1250 struct ethtool_fec_stats *fec_stats) 1251 { 1252 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)]; 1253 struct mlx5_core_dev *mdev = priv->mdev; 1254 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1255 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1256 1257 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1258 return; 1259 1260 MLX5_SET(ppcnt_reg, in, local_port, 1); 1261 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1262 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, 1263 sz, MLX5_REG_PPCNT, 0, 0)) 1264 return; 1265 1266 fec_stats->corrected_bits.total = 1267 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical, 1268 phys_layer_statistical_cntrs, 1269 phy_corrected_bits); 1270 } 1271 1272 void mlx5e_stats_fec_get(struct mlx5e_priv *priv, 1273 struct ethtool_fec_stats *fec_stats) 1274 { 1275 fec_set_corrected_bits_total(priv, fec_stats); 1276 fec_set_block_stats(priv, fec_stats); 1277 } 1278 1279 #define PPORT_ETH_EXT_OFF(c) \ 1280 MLX5_BYTE_OFF(ppcnt_reg, \ 1281 counter_set.eth_extended_cntrs_grp_data_layout.c##_high) 1282 static const struct counter_desc pport_eth_ext_stats_desc[] = { 1283 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, 1284 }; 1285 1286 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) 1287 1288 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext) 1289 { 1290 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1291 return NUM_PPORT_ETH_EXT_COUNTERS; 1292 1293 return 0; 1294 } 1295 1296 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) 1297 { 1298 int i; 1299 1300 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1301 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1302 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1303 pport_eth_ext_stats_desc[i].format); 1304 return idx; 1305 } 1306 1307 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) 1308 { 1309 int i; 1310 1311 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1312 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1313 data[idx++] = 1314 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, 1315 pport_eth_ext_stats_desc, i); 1316 return idx; 1317 } 1318 1319 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) 1320 { 1321 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1322 struct mlx5_core_dev *mdev = priv->mdev; 1323 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1324 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1325 void *out; 1326 1327 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) 1328 return; 1329 1330 MLX5_SET(ppcnt_reg, in, local_port, 1); 1331 out = pstats->eth_ext_counters; 1332 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 1333 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1334 } 1335 1336 #define PCIE_PERF_OFF(c) \ 1337 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) 1338 static const struct counter_desc pcie_perf_stats_desc[] = { 1339 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, 1340 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, 1341 }; 1342 1343 #define PCIE_PERF_OFF64(c) \ 1344 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) 1345 static const struct counter_desc pcie_perf_stats_desc64[] = { 1346 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, 1347 }; 1348 1349 static const struct counter_desc pcie_perf_stall_stats_desc[] = { 1350 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, 1351 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, 1352 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, 1353 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, 1354 }; 1355 1356 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) 1357 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) 1358 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) 1359 1360 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie) 1361 { 1362 int num_stats = 0; 1363 1364 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1365 num_stats += NUM_PCIE_PERF_COUNTERS; 1366 1367 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1368 num_stats += NUM_PCIE_PERF_COUNTERS64; 1369 1370 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1371 num_stats += NUM_PCIE_PERF_STALL_COUNTERS; 1372 1373 return num_stats; 1374 } 1375 1376 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) 1377 { 1378 int i; 1379 1380 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1381 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1382 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1383 pcie_perf_stats_desc[i].format); 1384 1385 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1386 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1387 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1388 pcie_perf_stats_desc64[i].format); 1389 1390 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1391 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1392 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1393 pcie_perf_stall_stats_desc[i].format); 1394 return idx; 1395 } 1396 1397 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) 1398 { 1399 int i; 1400 1401 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1402 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1403 data[idx++] = 1404 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 1405 pcie_perf_stats_desc, i); 1406 1407 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1408 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1409 data[idx++] = 1410 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, 1411 pcie_perf_stats_desc64, i); 1412 1413 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1414 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1415 data[idx++] = 1416 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 1417 pcie_perf_stall_stats_desc, i); 1418 return idx; 1419 } 1420 1421 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) 1422 { 1423 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; 1424 struct mlx5_core_dev *mdev = priv->mdev; 1425 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; 1426 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 1427 void *out; 1428 1429 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) 1430 return; 1431 1432 out = pcie_stats->pcie_perf_counters; 1433 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 1434 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 1435 } 1436 1437 #define PPORT_PER_TC_PRIO_OFF(c) \ 1438 MLX5_BYTE_OFF(ppcnt_reg, \ 1439 counter_set.eth_per_tc_prio_grp_data_layout.c##_high) 1440 1441 static const struct counter_desc pport_per_tc_prio_stats_desc[] = { 1442 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) }, 1443 }; 1444 1445 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc) 1446 1447 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ 1448 MLX5_BYTE_OFF(ppcnt_reg, \ 1449 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) 1450 1451 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = { 1452 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) }, 1453 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) }, 1454 }; 1455 1456 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \ 1457 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc) 1458 1459 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv) 1460 { 1461 struct mlx5_core_dev *mdev = priv->mdev; 1462 1463 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1464 return 0; 1465 1466 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; 1467 } 1468 1469 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) 1470 { 1471 struct mlx5_core_dev *mdev = priv->mdev; 1472 int i, prio; 1473 1474 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1475 return idx; 1476 1477 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1478 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1479 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1480 pport_per_tc_prio_stats_desc[i].format, prio); 1481 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) 1482 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1483 pport_per_tc_congest_prio_stats_desc[i].format, prio); 1484 } 1485 1486 return idx; 1487 } 1488 1489 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) 1490 { 1491 struct mlx5e_pport_stats *pport = &priv->stats.pport; 1492 struct mlx5_core_dev *mdev = priv->mdev; 1493 int i, prio; 1494 1495 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1496 return idx; 1497 1498 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1499 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1500 data[idx++] = 1501 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio], 1502 pport_per_tc_prio_stats_desc, i); 1503 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) 1504 data[idx++] = 1505 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio], 1506 pport_per_tc_congest_prio_stats_desc, i); 1507 } 1508 1509 return idx; 1510 } 1511 1512 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) 1513 { 1514 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1515 struct mlx5_core_dev *mdev = priv->mdev; 1516 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1517 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1518 void *out; 1519 int prio; 1520 1521 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1522 return; 1523 1524 MLX5_SET(ppcnt_reg, in, pnat, 2); 1525 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP); 1526 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1527 out = pstats->per_tc_prio_counters[prio]; 1528 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1529 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1530 } 1531 } 1532 1533 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv) 1534 { 1535 struct mlx5_core_dev *mdev = priv->mdev; 1536 1537 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1538 return 0; 1539 1540 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO; 1541 } 1542 1543 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv) 1544 { 1545 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1546 struct mlx5_core_dev *mdev = priv->mdev; 1547 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1548 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1549 void *out; 1550 int prio; 1551 1552 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1553 return; 1554 1555 MLX5_SET(ppcnt_reg, in, pnat, 2); 1556 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP); 1557 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1558 out = pstats->per_tc_congest_prio_counters[prio]; 1559 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1560 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1561 } 1562 } 1563 1564 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest) 1565 { 1566 return mlx5e_grp_per_tc_prio_get_num_stats(priv) + 1567 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); 1568 } 1569 1570 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest) 1571 { 1572 mlx5e_grp_per_tc_prio_update_stats(priv); 1573 mlx5e_grp_per_tc_congest_prio_update_stats(priv); 1574 } 1575 1576 #define PPORT_PER_PRIO_OFF(c) \ 1577 MLX5_BYTE_OFF(ppcnt_reg, \ 1578 counter_set.eth_per_prio_grp_data_layout.c##_high) 1579 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { 1580 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, 1581 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, 1582 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) }, 1583 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, 1584 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, 1585 }; 1586 1587 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 1588 1589 static int mlx5e_grp_per_prio_traffic_get_num_stats(void) 1590 { 1591 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO; 1592 } 1593 1594 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, 1595 u8 *data, 1596 int idx) 1597 { 1598 int i, prio; 1599 1600 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1601 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1602 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1603 pport_per_prio_traffic_stats_desc[i].format, prio); 1604 } 1605 1606 return idx; 1607 } 1608 1609 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, 1610 u64 *data, 1611 int idx) 1612 { 1613 int i, prio; 1614 1615 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1616 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1617 data[idx++] = 1618 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 1619 pport_per_prio_traffic_stats_desc, i); 1620 } 1621 1622 return idx; 1623 } 1624 1625 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { 1626 /* %s is "global" or "prio{i}" */ 1627 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, 1628 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, 1629 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, 1630 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, 1631 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 1632 }; 1633 1634 static const struct counter_desc pport_pfc_stall_stats_desc[] = { 1635 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, 1636 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, 1637 }; 1638 1639 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) 1640 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \ 1641 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \ 1642 MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) 1643 1644 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) 1645 { 1646 struct mlx5_core_dev *mdev = priv->mdev; 1647 u8 pfc_en_tx; 1648 u8 pfc_en_rx; 1649 int err; 1650 1651 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1652 return 0; 1653 1654 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); 1655 1656 return err ? 0 : pfc_en_tx | pfc_en_rx; 1657 } 1658 1659 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) 1660 { 1661 struct mlx5_core_dev *mdev = priv->mdev; 1662 u32 rx_pause; 1663 u32 tx_pause; 1664 int err; 1665 1666 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1667 return false; 1668 1669 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); 1670 1671 return err ? false : rx_pause | tx_pause; 1672 } 1673 1674 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) 1675 { 1676 return (mlx5e_query_global_pause_combined(priv) + 1677 hweight8(mlx5e_query_pfc_combined(priv))) * 1678 NUM_PPORT_PER_PRIO_PFC_COUNTERS + 1679 NUM_PPORT_PFC_STALL_COUNTERS(priv); 1680 } 1681 1682 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, 1683 u8 *data, 1684 int idx) 1685 { 1686 unsigned long pfc_combined; 1687 int i, prio; 1688 1689 pfc_combined = mlx5e_query_pfc_combined(priv); 1690 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 1691 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1692 char pfc_string[ETH_GSTRING_LEN]; 1693 1694 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); 1695 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1696 pport_per_prio_pfc_stats_desc[i].format, pfc_string); 1697 } 1698 } 1699 1700 if (mlx5e_query_global_pause_combined(priv)) { 1701 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1702 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1703 pport_per_prio_pfc_stats_desc[i].format, "global"); 1704 } 1705 } 1706 1707 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 1708 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1709 pport_pfc_stall_stats_desc[i].format); 1710 1711 return idx; 1712 } 1713 1714 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, 1715 u64 *data, 1716 int idx) 1717 { 1718 unsigned long pfc_combined; 1719 int i, prio; 1720 1721 pfc_combined = mlx5e_query_pfc_combined(priv); 1722 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 1723 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1724 data[idx++] = 1725 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 1726 pport_per_prio_pfc_stats_desc, i); 1727 } 1728 } 1729 1730 if (mlx5e_query_global_pause_combined(priv)) { 1731 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1732 data[idx++] = 1733 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 1734 pport_per_prio_pfc_stats_desc, i); 1735 } 1736 } 1737 1738 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 1739 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 1740 pport_pfc_stall_stats_desc, i); 1741 1742 return idx; 1743 } 1744 1745 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) 1746 { 1747 return mlx5e_grp_per_prio_traffic_get_num_stats() + 1748 mlx5e_grp_per_prio_pfc_get_num_stats(priv); 1749 } 1750 1751 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) 1752 { 1753 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); 1754 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); 1755 return idx; 1756 } 1757 1758 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) 1759 { 1760 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); 1761 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); 1762 return idx; 1763 } 1764 1765 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) 1766 { 1767 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1768 struct mlx5_core_dev *mdev = priv->mdev; 1769 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1770 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1771 int prio; 1772 void *out; 1773 1774 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 1775 return; 1776 1777 MLX5_SET(ppcnt_reg, in, local_port, 1); 1778 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 1779 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1780 out = pstats->per_prio_counters[prio]; 1781 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1782 mlx5_core_access_reg(mdev, in, sz, out, sz, 1783 MLX5_REG_PPCNT, 0, 0); 1784 } 1785 } 1786 1787 static const struct counter_desc mlx5e_pme_status_desc[] = { 1788 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED }, 1789 }; 1790 1791 static const struct counter_desc mlx5e_pme_error_desc[] = { 1792 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK }, 1793 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE }, 1794 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE }, 1795 }; 1796 1797 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) 1798 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) 1799 1800 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme) 1801 { 1802 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; 1803 } 1804 1805 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) 1806 { 1807 int i; 1808 1809 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 1810 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); 1811 1812 for (i = 0; i < NUM_PME_ERR_STATS; i++) 1813 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); 1814 1815 return idx; 1816 } 1817 1818 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) 1819 { 1820 struct mlx5_pme_stats pme_stats; 1821 int i; 1822 1823 mlx5_get_pme_stats(priv->mdev, &pme_stats); 1824 1825 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 1826 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters, 1827 mlx5e_pme_status_desc, i); 1828 1829 for (i = 0; i < NUM_PME_ERR_STATS; i++) 1830 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters, 1831 mlx5e_pme_error_desc, i); 1832 1833 return idx; 1834 } 1835 1836 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } 1837 1838 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) 1839 { 1840 return mlx5e_tls_get_count(priv); 1841 } 1842 1843 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) 1844 { 1845 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); 1846 } 1847 1848 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) 1849 { 1850 return idx + mlx5e_tls_get_stats(priv, data + idx); 1851 } 1852 1853 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } 1854 1855 static const struct counter_desc rq_stats_desc[] = { 1856 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 1857 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 1858 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 1859 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 1860 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 1861 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 1862 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1863 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 1864 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 1865 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 1866 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, 1867 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, 1868 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) }, 1869 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) }, 1870 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) }, 1871 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) }, 1872 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, 1873 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, 1874 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 1875 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 1876 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1877 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1878 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 1879 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1880 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1881 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1882 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, 1883 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, 1884 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, 1885 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, 1886 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, 1887 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, 1888 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, 1889 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, 1890 #ifdef CONFIG_MLX5_EN_TLS 1891 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, 1892 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, 1893 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, 1894 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, 1895 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, 1896 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, 1897 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, 1898 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) }, 1899 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, 1900 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, 1901 #endif 1902 }; 1903 1904 static const struct counter_desc sq_stats_desc[] = { 1905 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, 1906 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, 1907 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 1908 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 1909 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 1910 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 1911 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 1912 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 1913 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 1914 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 1915 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 1916 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 1917 #ifdef CONFIG_MLX5_EN_TLS 1918 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 1919 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 1920 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 1921 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 1922 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 1923 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 1924 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 1925 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 1926 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 1927 #endif 1928 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 1929 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 1930 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, 1931 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 1932 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, 1933 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 1934 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, 1935 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 1936 }; 1937 1938 static const struct counter_desc rq_xdpsq_stats_desc[] = { 1939 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1940 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 1941 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 1942 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 1943 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1944 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1945 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1946 }; 1947 1948 static const struct counter_desc xdpsq_stats_desc[] = { 1949 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1950 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 1951 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 1952 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 1953 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1954 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1955 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1956 }; 1957 1958 static const struct counter_desc xskrq_stats_desc[] = { 1959 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) }, 1960 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) }, 1961 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 1962 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 1963 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1964 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) }, 1965 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 1966 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 1967 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 1968 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 1969 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 1970 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1971 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1972 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 1973 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1974 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1975 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1976 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 1977 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) }, 1978 }; 1979 1980 static const struct counter_desc xsksq_stats_desc[] = { 1981 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1982 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 1983 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 1984 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1985 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1986 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1987 }; 1988 1989 static const struct counter_desc ch_stats_desc[] = { 1990 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, 1991 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, 1992 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, 1993 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, 1994 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) }, 1995 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 1996 }; 1997 1998 static const struct counter_desc ptp_sq_stats_desc[] = { 1999 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) }, 2000 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2001 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2002 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2003 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2004 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) }, 2005 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2006 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2007 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2008 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2009 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) }, 2010 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2011 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) }, 2012 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2013 }; 2014 2015 static const struct counter_desc ptp_ch_stats_desc[] = { 2016 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) }, 2017 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) }, 2018 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) }, 2019 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 2020 }; 2021 2022 static const struct counter_desc ptp_cq_stats_desc[] = { 2023 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) }, 2024 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, 2025 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, 2026 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, 2027 }; 2028 2029 static const struct counter_desc ptp_rq_stats_desc[] = { 2030 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) }, 2031 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) }, 2032 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 2033 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 2034 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 2035 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 2036 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 2037 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) }, 2038 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 2039 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 2040 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) }, 2041 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) }, 2042 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 2043 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 2044 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 2045 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 2046 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 2047 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 2048 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 2049 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 2050 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 2051 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) }, 2052 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) }, 2053 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) }, 2054 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) }, 2055 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) }, 2056 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 2057 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) }, 2058 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) }, 2059 }; 2060 2061 static const struct counter_desc qos_sq_stats_desc[] = { 2062 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) }, 2063 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2064 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 2065 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 2066 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 2067 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 2068 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2069 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2070 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2071 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) }, 2072 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 2073 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 2074 #ifdef CONFIG_MLX5_EN_TLS 2075 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 2076 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 2077 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 2078 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 2079 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 2080 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 2081 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 2082 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 2083 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 2084 #endif 2085 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2086 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2087 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2088 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2089 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) }, 2090 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2091 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) }, 2092 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2093 }; 2094 2095 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 2096 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 2097 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) 2098 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) 2099 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) 2100 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) 2101 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) 2102 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) 2103 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) 2104 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) 2105 #define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc) 2106 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc) 2107 2108 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) 2109 { 2110 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2111 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs); 2112 } 2113 2114 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) 2115 { 2116 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2117 u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 2118 int i, qid; 2119 2120 for (qid = 0; qid < max_qos_sqs; qid++) 2121 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 2122 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2123 qos_sq_stats_desc[i].format, qid); 2124 2125 return idx; 2126 } 2127 2128 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) 2129 { 2130 struct mlx5e_sq_stats **stats; 2131 u16 max_qos_sqs; 2132 int i, qid; 2133 2134 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2135 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 2136 stats = READ_ONCE(priv->htb.qos_sq_stats); 2137 2138 for (qid = 0; qid < max_qos_sqs; qid++) { 2139 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); 2140 2141 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 2142 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i); 2143 } 2144 2145 return idx; 2146 } 2147 2148 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } 2149 2150 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) 2151 { 2152 int num = NUM_PTP_CH_STATS; 2153 2154 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2155 return 0; 2156 2157 if (priv->tx_ptp_opened) 2158 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc; 2159 if (priv->rx_ptp_opened) 2160 num += NUM_PTP_RQ_STATS; 2161 2162 return num; 2163 } 2164 2165 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) 2166 { 2167 int i, tc; 2168 2169 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2170 return idx; 2171 2172 for (i = 0; i < NUM_PTP_CH_STATS; i++) 2173 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2174 "%s", ptp_ch_stats_desc[i].format); 2175 2176 if (priv->tx_ptp_opened) { 2177 for (tc = 0; tc < priv->max_opened_tc; tc++) 2178 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 2179 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2180 ptp_sq_stats_desc[i].format, tc); 2181 2182 for (tc = 0; tc < priv->max_opened_tc; tc++) 2183 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 2184 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2185 ptp_cq_stats_desc[i].format, tc); 2186 } 2187 if (priv->rx_ptp_opened) { 2188 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2189 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2190 ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX); 2191 } 2192 return idx; 2193 } 2194 2195 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) 2196 { 2197 int i, tc; 2198 2199 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2200 return idx; 2201 2202 for (i = 0; i < NUM_PTP_CH_STATS; i++) 2203 data[idx++] = 2204 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch, 2205 ptp_ch_stats_desc, i); 2206 2207 if (priv->tx_ptp_opened) { 2208 for (tc = 0; tc < priv->max_opened_tc; tc++) 2209 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 2210 data[idx++] = 2211 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc], 2212 ptp_sq_stats_desc, i); 2213 2214 for (tc = 0; tc < priv->max_opened_tc; tc++) 2215 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 2216 data[idx++] = 2217 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc], 2218 ptp_cq_stats_desc, i); 2219 } 2220 if (priv->rx_ptp_opened) { 2221 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2222 data[idx++] = 2223 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq, 2224 ptp_rq_stats_desc, i); 2225 } 2226 return idx; 2227 } 2228 2229 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } 2230 2231 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) 2232 { 2233 int max_nch = priv->stats_nch; 2234 2235 return (NUM_RQ_STATS * max_nch) + 2236 (NUM_CH_STATS * max_nch) + 2237 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + 2238 (NUM_RQ_XDPSQ_STATS * max_nch) + 2239 (NUM_XDPSQ_STATS * max_nch) + 2240 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) + 2241 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used); 2242 } 2243 2244 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) 2245 { 2246 bool is_xsk = priv->xsk.ever_used; 2247 int max_nch = priv->stats_nch; 2248 int i, j, tc; 2249 2250 for (i = 0; i < max_nch; i++) 2251 for (j = 0; j < NUM_CH_STATS; j++) 2252 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2253 ch_stats_desc[j].format, i); 2254 2255 for (i = 0; i < max_nch; i++) { 2256 for (j = 0; j < NUM_RQ_STATS; j++) 2257 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2258 rq_stats_desc[j].format, i); 2259 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 2260 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2261 xskrq_stats_desc[j].format, i); 2262 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 2263 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2264 rq_xdpsq_stats_desc[j].format, i); 2265 } 2266 2267 for (tc = 0; tc < priv->max_opened_tc; tc++) 2268 for (i = 0; i < max_nch; i++) 2269 for (j = 0; j < NUM_SQ_STATS; j++) 2270 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2271 sq_stats_desc[j].format, 2272 i + tc * max_nch); 2273 2274 for (i = 0; i < max_nch; i++) { 2275 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2276 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2277 xsksq_stats_desc[j].format, i); 2278 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2279 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2280 xdpsq_stats_desc[j].format, i); 2281 } 2282 2283 return idx; 2284 } 2285 2286 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) 2287 { 2288 bool is_xsk = priv->xsk.ever_used; 2289 int max_nch = priv->stats_nch; 2290 int i, j, tc; 2291 2292 for (i = 0; i < max_nch; i++) 2293 for (j = 0; j < NUM_CH_STATS; j++) 2294 data[idx++] = 2295 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch, 2296 ch_stats_desc, j); 2297 2298 for (i = 0; i < max_nch; i++) { 2299 for (j = 0; j < NUM_RQ_STATS; j++) 2300 data[idx++] = 2301 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq, 2302 rq_stats_desc, j); 2303 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 2304 data[idx++] = 2305 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq, 2306 xskrq_stats_desc, j); 2307 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 2308 data[idx++] = 2309 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq, 2310 rq_xdpsq_stats_desc, j); 2311 } 2312 2313 for (tc = 0; tc < priv->max_opened_tc; tc++) 2314 for (i = 0; i < max_nch; i++) 2315 for (j = 0; j < NUM_SQ_STATS; j++) 2316 data[idx++] = 2317 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc], 2318 sq_stats_desc, j); 2319 2320 for (i = 0; i < max_nch; i++) { 2321 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2322 data[idx++] = 2323 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq, 2324 xsksq_stats_desc, j); 2325 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2326 data[idx++] = 2327 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq, 2328 xdpsq_stats_desc, j); 2329 } 2330 2331 return idx; 2332 } 2333 2334 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } 2335 2336 MLX5E_DEFINE_STATS_GRP(sw, 0); 2337 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS); 2338 MLX5E_DEFINE_STATS_GRP(vnic_env, 0); 2339 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS); 2340 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS); 2341 MLX5E_DEFINE_STATS_GRP(2863, 0); 2342 MLX5E_DEFINE_STATS_GRP(2819, 0); 2343 MLX5E_DEFINE_STATS_GRP(phy, 0); 2344 MLX5E_DEFINE_STATS_GRP(pcie, 0); 2345 MLX5E_DEFINE_STATS_GRP(per_prio, 0); 2346 MLX5E_DEFINE_STATS_GRP(pme, 0); 2347 MLX5E_DEFINE_STATS_GRP(channels, 0); 2348 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); 2349 MLX5E_DEFINE_STATS_GRP(eth_ext, 0); 2350 static MLX5E_DEFINE_STATS_GRP(tls, 0); 2351 static MLX5E_DEFINE_STATS_GRP(ptp, 0); 2352 static MLX5E_DEFINE_STATS_GRP(qos, 0); 2353 2354 /* The stats groups order is opposite to the update_stats() order calls */ 2355 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { 2356 &MLX5E_STATS_GRP(sw), 2357 &MLX5E_STATS_GRP(qcnt), 2358 &MLX5E_STATS_GRP(vnic_env), 2359 &MLX5E_STATS_GRP(vport), 2360 &MLX5E_STATS_GRP(802_3), 2361 &MLX5E_STATS_GRP(2863), 2362 &MLX5E_STATS_GRP(2819), 2363 &MLX5E_STATS_GRP(phy), 2364 &MLX5E_STATS_GRP(eth_ext), 2365 &MLX5E_STATS_GRP(pcie), 2366 &MLX5E_STATS_GRP(per_prio), 2367 &MLX5E_STATS_GRP(pme), 2368 #ifdef CONFIG_MLX5_EN_IPSEC 2369 &MLX5E_STATS_GRP(ipsec_sw), 2370 &MLX5E_STATS_GRP(ipsec_hw), 2371 #endif 2372 &MLX5E_STATS_GRP(tls), 2373 &MLX5E_STATS_GRP(channels), 2374 &MLX5E_STATS_GRP(per_port_buff_congest), 2375 &MLX5E_STATS_GRP(ptp), 2376 &MLX5E_STATS_GRP(qos), 2377 }; 2378 2379 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) 2380 { 2381 return ARRAY_SIZE(mlx5e_nic_stats_grps); 2382 } 2383