1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #include <linux/inetdevice.h> 5 #include <linux/etherdevice.h> 6 #include <linux/ethtool.h> 7 8 #include <net/mana/mana.h> 9 10 static const struct { 11 char name[ETH_GSTRING_LEN]; 12 u16 offset; 13 } mana_eth_stats[] = { 14 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, 15 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, 16 {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, 17 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, 18 hc_tx_ucast_pkts)}, 19 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, 20 hc_tx_ucast_bytes)}, 21 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, 22 hc_tx_bcast_pkts)}, 23 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, 24 hc_tx_bcast_bytes)}, 25 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, 26 hc_tx_mcast_pkts)}, 27 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, 28 hc_tx_mcast_bytes)}, 29 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, 30 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 31 tx_cqe_unknown_type)}, 32 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, 33 rx_coalesced_err)}, 34 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 35 rx_cqe_unknown_type)}, 36 }; 37 38 static int mana_get_sset_count(struct net_device *ndev, int stringset) 39 { 40 struct mana_port_context *apc = netdev_priv(ndev); 41 unsigned int num_queues = apc->num_queues; 42 43 if (stringset != ETH_SS_STATS) 44 return -EINVAL; 45 46 return ARRAY_SIZE(mana_eth_stats) + num_queues * 47 (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); 48 } 49 50 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 51 { 52 struct mana_port_context *apc = netdev_priv(ndev); 53 unsigned int num_queues = apc->num_queues; 54 u8 *p = data; 55 int i; 56 57 if (stringset != ETH_SS_STATS) 58 return; 59 60 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) { 61 memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN); 62 p += ETH_GSTRING_LEN; 63 } 64 65 for (i = 0; i < num_queues; i++) { 66 sprintf(p, "rx_%d_packets", i); 67 p += ETH_GSTRING_LEN; 68 sprintf(p, "rx_%d_bytes", i); 69 p += ETH_GSTRING_LEN; 70 sprintf(p, "rx_%d_xdp_drop", i); 71 p += ETH_GSTRING_LEN; 72 sprintf(p, "rx_%d_xdp_tx", i); 73 p += ETH_GSTRING_LEN; 74 sprintf(p, "rx_%d_xdp_redirect", i); 75 p += ETH_GSTRING_LEN; 76 } 77 78 for (i = 0; i < num_queues; i++) { 79 sprintf(p, "tx_%d_packets", i); 80 p += ETH_GSTRING_LEN; 81 sprintf(p, "tx_%d_bytes", i); 82 p += ETH_GSTRING_LEN; 83 sprintf(p, "tx_%d_xdp_xmit", i); 84 p += ETH_GSTRING_LEN; 85 sprintf(p, "tx_%d_tso_packets", i); 86 p += ETH_GSTRING_LEN; 87 sprintf(p, "tx_%d_tso_bytes", i); 88 p += ETH_GSTRING_LEN; 89 sprintf(p, "tx_%d_tso_inner_packets", i); 90 p += ETH_GSTRING_LEN; 91 sprintf(p, "tx_%d_tso_inner_bytes", i); 92 p += ETH_GSTRING_LEN; 93 sprintf(p, "tx_%d_long_pkt_fmt", i); 94 p += ETH_GSTRING_LEN; 95 sprintf(p, "tx_%d_short_pkt_fmt", i); 96 p += ETH_GSTRING_LEN; 97 sprintf(p, "tx_%d_csum_partial", i); 98 p += ETH_GSTRING_LEN; 99 sprintf(p, "tx_%d_mana_map_err", i); 100 p += ETH_GSTRING_LEN; 101 } 102 } 103 104 static void mana_get_ethtool_stats(struct net_device *ndev, 105 struct ethtool_stats *e_stats, u64 *data) 106 { 107 struct mana_port_context *apc = netdev_priv(ndev); 108 unsigned int num_queues = apc->num_queues; 109 void *eth_stats = &apc->eth_stats; 110 struct mana_stats_rx *rx_stats; 111 struct mana_stats_tx *tx_stats; 112 unsigned int start; 113 u64 packets, bytes; 114 u64 xdp_redirect; 115 u64 xdp_xmit; 116 u64 xdp_drop; 117 u64 xdp_tx; 118 u64 tso_packets; 119 u64 tso_bytes; 120 u64 tso_inner_packets; 121 u64 tso_inner_bytes; 122 u64 long_pkt_fmt; 123 u64 short_pkt_fmt; 124 u64 csum_partial; 125 u64 mana_map_err; 126 int q, i = 0; 127 128 if (!apc->port_is_up) 129 return; 130 /* we call mana function to update stats from GDMA */ 131 mana_query_gf_stats(apc); 132 133 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) 134 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); 135 136 for (q = 0; q < num_queues; q++) { 137 rx_stats = &apc->rxqs[q]->stats; 138 139 do { 140 start = u64_stats_fetch_begin(&rx_stats->syncp); 141 packets = rx_stats->packets; 142 bytes = rx_stats->bytes; 143 xdp_drop = rx_stats->xdp_drop; 144 xdp_tx = rx_stats->xdp_tx; 145 xdp_redirect = rx_stats->xdp_redirect; 146 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 147 148 data[i++] = packets; 149 data[i++] = bytes; 150 data[i++] = xdp_drop; 151 data[i++] = xdp_tx; 152 data[i++] = xdp_redirect; 153 } 154 155 for (q = 0; q < num_queues; q++) { 156 tx_stats = &apc->tx_qp[q].txq.stats; 157 158 do { 159 start = u64_stats_fetch_begin(&tx_stats->syncp); 160 packets = tx_stats->packets; 161 bytes = tx_stats->bytes; 162 xdp_xmit = tx_stats->xdp_xmit; 163 tso_packets = tx_stats->tso_packets; 164 tso_bytes = tx_stats->tso_bytes; 165 tso_inner_packets = tx_stats->tso_inner_packets; 166 tso_inner_bytes = tx_stats->tso_inner_bytes; 167 long_pkt_fmt = tx_stats->long_pkt_fmt; 168 short_pkt_fmt = tx_stats->short_pkt_fmt; 169 csum_partial = tx_stats->csum_partial; 170 mana_map_err = tx_stats->mana_map_err; 171 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 172 173 data[i++] = packets; 174 data[i++] = bytes; 175 data[i++] = xdp_xmit; 176 data[i++] = tso_packets; 177 data[i++] = tso_bytes; 178 data[i++] = tso_inner_packets; 179 data[i++] = tso_inner_bytes; 180 data[i++] = long_pkt_fmt; 181 data[i++] = short_pkt_fmt; 182 data[i++] = csum_partial; 183 data[i++] = mana_map_err; 184 } 185 } 186 187 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd, 188 u32 *rules) 189 { 190 struct mana_port_context *apc = netdev_priv(ndev); 191 192 switch (cmd->cmd) { 193 case ETHTOOL_GRXRINGS: 194 cmd->data = apc->num_queues; 195 return 0; 196 } 197 198 return -EOPNOTSUPP; 199 } 200 201 static u32 mana_get_rxfh_key_size(struct net_device *ndev) 202 { 203 return MANA_HASH_KEY_SIZE; 204 } 205 206 static u32 mana_rss_indir_size(struct net_device *ndev) 207 { 208 return MANA_INDIRECT_TABLE_SIZE; 209 } 210 211 static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, 212 u8 *hfunc) 213 { 214 struct mana_port_context *apc = netdev_priv(ndev); 215 int i; 216 217 if (hfunc) 218 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 219 220 if (indir) { 221 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) 222 indir[i] = apc->indir_table[i]; 223 } 224 225 if (key) 226 memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE); 227 228 return 0; 229 } 230 231 static int mana_set_rxfh(struct net_device *ndev, const u32 *indir, 232 const u8 *key, const u8 hfunc) 233 { 234 struct mana_port_context *apc = netdev_priv(ndev); 235 bool update_hash = false, update_table = false; 236 u32 save_table[MANA_INDIRECT_TABLE_SIZE]; 237 u8 save_key[MANA_HASH_KEY_SIZE]; 238 int i, err; 239 240 if (!apc->port_is_up) 241 return -EOPNOTSUPP; 242 243 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 244 return -EOPNOTSUPP; 245 246 if (indir) { 247 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) 248 if (indir[i] >= apc->num_queues) 249 return -EINVAL; 250 251 update_table = true; 252 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { 253 save_table[i] = apc->indir_table[i]; 254 apc->indir_table[i] = indir[i]; 255 } 256 } 257 258 if (key) { 259 update_hash = true; 260 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE); 261 memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE); 262 } 263 264 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); 265 266 if (err) { /* recover to original values */ 267 if (update_table) { 268 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) 269 apc->indir_table[i] = save_table[i]; 270 } 271 272 if (update_hash) 273 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE); 274 275 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); 276 } 277 278 return err; 279 } 280 281 static void mana_get_channels(struct net_device *ndev, 282 struct ethtool_channels *channel) 283 { 284 struct mana_port_context *apc = netdev_priv(ndev); 285 286 channel->max_combined = apc->max_queues; 287 channel->combined_count = apc->num_queues; 288 } 289 290 static int mana_set_channels(struct net_device *ndev, 291 struct ethtool_channels *channels) 292 { 293 struct mana_port_context *apc = netdev_priv(ndev); 294 unsigned int new_count = channels->combined_count; 295 unsigned int old_count = apc->num_queues; 296 int err, err2; 297 298 err = mana_detach(ndev, false); 299 if (err) { 300 netdev_err(ndev, "mana_detach failed: %d\n", err); 301 return err; 302 } 303 304 apc->num_queues = new_count; 305 err = mana_attach(ndev); 306 if (!err) 307 return 0; 308 309 netdev_err(ndev, "mana_attach failed: %d\n", err); 310 311 /* Try to roll it back to the old configuration. */ 312 apc->num_queues = old_count; 313 err2 = mana_attach(ndev); 314 if (err2) 315 netdev_err(ndev, "mana re-attach failed: %d\n", err2); 316 317 return err; 318 } 319 320 const struct ethtool_ops mana_ethtool_ops = { 321 .get_ethtool_stats = mana_get_ethtool_stats, 322 .get_sset_count = mana_get_sset_count, 323 .get_strings = mana_get_strings, 324 .get_rxnfc = mana_get_rxnfc, 325 .get_rxfh_key_size = mana_get_rxfh_key_size, 326 .get_rxfh_indir_size = mana_rss_indir_size, 327 .get_rxfh = mana_get_rxfh, 328 .set_rxfh = mana_set_rxfh, 329 .get_channels = mana_get_channels, 330 .set_channels = mana_set_channels, 331 }; 332