1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/fs.h> 34 #include <net/switchdev.h> 35 #include <net/pkt_cls.h> 36 #include <net/act_api.h> 37 #include <net/devlink.h> 38 #include <net/ipv6_stubs.h> 39 40 #include "eswitch.h" 41 #include "en.h" 42 #include "en_rep.h" 43 #include "en/params.h" 44 #include "en/txrx.h" 45 #include "en_tc.h" 46 #include "en/rep/tc.h" 47 #include "en/rep/neigh.h" 48 #include "en/rep/bridge.h" 49 #include "en/devlink.h" 50 #include "fs_core.h" 51 #include "lib/mlx5.h" 52 #include "lib/devcom.h" 53 #include "lib/vxlan.h" 54 #define CREATE_TRACE_POINTS 55 #include "diag/en_rep_tracepoint.h" 56 #include "en_accel/ipsec.h" 57 #include "en/tc/int_port.h" 58 #include "en/ptp.h" 59 #include "en/fs_ethtool.h" 60 61 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ 62 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) 63 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 64 65 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; 66 67 static void mlx5e_rep_get_drvinfo(struct net_device *dev, 68 struct ethtool_drvinfo *drvinfo) 69 { 70 struct mlx5e_priv *priv = netdev_priv(dev); 71 struct mlx5_core_dev *mdev = priv->mdev; 72 73 strscpy(drvinfo->driver, mlx5e_rep_driver_name, 74 sizeof(drvinfo->driver)); 75 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 76 "%d.%d.%04d (%.16s)", 77 fw_rev_maj(mdev), fw_rev_min(mdev), 78 fw_rev_sub(mdev), mdev->board_id); 79 } 80 81 static const struct counter_desc sw_rep_stats_desc[] = { 82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 86 }; 87 88 struct vport_stats { 89 u64 vport_rx_packets; 90 u64 vport_tx_packets; 91 u64 vport_rx_bytes; 92 u64 vport_tx_bytes; 93 }; 94 95 static const struct counter_desc vport_rep_stats_desc[] = { 96 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) }, 97 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) }, 98 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) }, 99 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) }, 100 }; 101 102 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) 103 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) 104 105 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep) 106 { 107 return NUM_VPORT_REP_SW_COUNTERS; 108 } 109 110 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep) 111 { 112 int i; 113 114 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 115 strcpy(data + (idx++) * ETH_GSTRING_LEN, 116 sw_rep_stats_desc[i].format); 117 return idx; 118 } 119 120 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep) 121 { 122 int i; 123 124 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 125 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, 126 sw_rep_stats_desc, i); 127 return idx; 128 } 129 130 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep) 131 { 132 struct mlx5e_sw_stats *s = &priv->stats.sw; 133 struct rtnl_link_stats64 stats64 = {}; 134 135 memset(s, 0, sizeof(*s)); 136 mlx5e_fold_sw_stats64(priv, &stats64); 137 138 s->rx_packets = stats64.rx_packets; 139 s->rx_bytes = stats64.rx_bytes; 140 s->tx_packets = stats64.tx_packets; 141 s->tx_bytes = stats64.tx_bytes; 142 s->tx_queue_dropped = stats64.tx_dropped; 143 } 144 145 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep) 146 { 147 return NUM_VPORT_REP_HW_COUNTERS; 148 } 149 150 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) 151 { 152 int i; 153 154 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) 155 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format); 156 return idx; 157 } 158 159 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) 160 { 161 int i; 162 163 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) 164 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, 165 vport_rep_stats_desc, i); 166 return idx; 167 } 168 169 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) 170 { 171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 172 struct mlx5e_rep_priv *rpriv = priv->ppriv; 173 struct mlx5_eswitch_rep *rep = rpriv->rep; 174 struct rtnl_link_stats64 *vport_stats; 175 struct ifla_vf_stats vf_stats; 176 int err; 177 178 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); 179 if (err) { 180 netdev_warn(priv->netdev, "vport %d error %d reading stats\n", 181 rep->vport, err); 182 return; 183 } 184 185 vport_stats = &priv->stats.vf_vport; 186 /* flip tx/rx as we are reporting the counters for the switch vport */ 187 vport_stats->rx_packets = vf_stats.tx_packets; 188 vport_stats->rx_bytes = vf_stats.tx_bytes; 189 vport_stats->tx_packets = vf_stats.rx_packets; 190 vport_stats->tx_bytes = vf_stats.rx_bytes; 191 } 192 193 static void mlx5e_rep_get_strings(struct net_device *dev, 194 u32 stringset, uint8_t *data) 195 { 196 struct mlx5e_priv *priv = netdev_priv(dev); 197 198 switch (stringset) { 199 case ETH_SS_STATS: 200 mlx5e_stats_fill_strings(priv, data); 201 break; 202 } 203 } 204 205 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, 206 struct ethtool_stats *stats, u64 *data) 207 { 208 struct mlx5e_priv *priv = netdev_priv(dev); 209 210 mlx5e_ethtool_get_ethtool_stats(priv, stats, data); 211 } 212 213 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) 214 { 215 struct mlx5e_priv *priv = netdev_priv(dev); 216 217 switch (sset) { 218 case ETH_SS_STATS: 219 return mlx5e_stats_total_num(priv); 220 default: 221 return -EOPNOTSUPP; 222 } 223 } 224 225 static void 226 mlx5e_rep_get_ringparam(struct net_device *dev, 227 struct ethtool_ringparam *param, 228 struct kernel_ethtool_ringparam *kernel_param, 229 struct netlink_ext_ack *extack) 230 { 231 struct mlx5e_priv *priv = netdev_priv(dev); 232 233 mlx5e_ethtool_get_ringparam(priv, param, kernel_param); 234 } 235 236 static int 237 mlx5e_rep_set_ringparam(struct net_device *dev, 238 struct ethtool_ringparam *param, 239 struct kernel_ethtool_ringparam *kernel_param, 240 struct netlink_ext_ack *extack) 241 { 242 struct mlx5e_priv *priv = netdev_priv(dev); 243 244 return mlx5e_ethtool_set_ringparam(priv, param); 245 } 246 247 static void mlx5e_rep_get_channels(struct net_device *dev, 248 struct ethtool_channels *ch) 249 { 250 struct mlx5e_priv *priv = netdev_priv(dev); 251 252 mlx5e_ethtool_get_channels(priv, ch); 253 } 254 255 static int mlx5e_rep_set_channels(struct net_device *dev, 256 struct ethtool_channels *ch) 257 { 258 struct mlx5e_priv *priv = netdev_priv(dev); 259 260 return mlx5e_ethtool_set_channels(priv, ch); 261 } 262 263 static int mlx5e_rep_get_coalesce(struct net_device *netdev, 264 struct ethtool_coalesce *coal, 265 struct kernel_ethtool_coalesce *kernel_coal, 266 struct netlink_ext_ack *extack) 267 { 268 struct mlx5e_priv *priv = netdev_priv(netdev); 269 270 return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); 271 } 272 273 static int mlx5e_rep_set_coalesce(struct net_device *netdev, 274 struct ethtool_coalesce *coal, 275 struct kernel_ethtool_coalesce *kernel_coal, 276 struct netlink_ext_ack *extack) 277 { 278 struct mlx5e_priv *priv = netdev_priv(netdev); 279 280 return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); 281 } 282 283 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) 284 { 285 struct mlx5e_priv *priv = netdev_priv(netdev); 286 287 return mlx5e_ethtool_get_rxfh_key_size(priv); 288 } 289 290 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) 291 { 292 struct mlx5e_priv *priv = netdev_priv(netdev); 293 294 return mlx5e_ethtool_get_rxfh_indir_size(priv); 295 } 296 297 static const struct ethtool_ops mlx5e_rep_ethtool_ops = { 298 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 299 ETHTOOL_COALESCE_MAX_FRAMES | 300 ETHTOOL_COALESCE_USE_ADAPTIVE, 301 .get_drvinfo = mlx5e_rep_get_drvinfo, 302 .get_link = ethtool_op_get_link, 303 .get_strings = mlx5e_rep_get_strings, 304 .get_sset_count = mlx5e_rep_get_sset_count, 305 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, 306 .get_ringparam = mlx5e_rep_get_ringparam, 307 .set_ringparam = mlx5e_rep_set_ringparam, 308 .get_channels = mlx5e_rep_get_channels, 309 .set_channels = mlx5e_rep_set_channels, 310 .get_coalesce = mlx5e_rep_get_coalesce, 311 .set_coalesce = mlx5e_rep_set_coalesce, 312 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, 313 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, 314 }; 315 316 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, 317 struct mlx5_eswitch_rep *rep) 318 { 319 struct mlx5e_rep_sq *rep_sq, *tmp; 320 struct mlx5e_rep_priv *rpriv; 321 322 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 323 return; 324 325 rpriv = mlx5e_rep_to_rep_priv(rep); 326 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { 327 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); 328 if (rep_sq->send_to_vport_rule_peer) 329 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); 330 list_del(&rep_sq->list); 331 kfree(rep_sq); 332 } 333 } 334 335 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, 336 struct mlx5_eswitch_rep *rep, 337 u32 *sqns_array, int sqns_num) 338 { 339 struct mlx5_eswitch *peer_esw = NULL; 340 struct mlx5_flow_handle *flow_rule; 341 struct mlx5e_rep_priv *rpriv; 342 struct mlx5e_rep_sq *rep_sq; 343 int err; 344 int i; 345 346 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 347 return 0; 348 349 rpriv = mlx5e_rep_to_rep_priv(rep); 350 if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS)) 351 peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom, 352 MLX5_DEVCOM_ESW_OFFLOADS); 353 354 for (i = 0; i < sqns_num; i++) { 355 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); 356 if (!rep_sq) { 357 err = -ENOMEM; 358 goto out_err; 359 } 360 361 /* Add re-inject rule to the PF/representor sqs */ 362 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, 363 sqns_array[i]); 364 if (IS_ERR(flow_rule)) { 365 err = PTR_ERR(flow_rule); 366 kfree(rep_sq); 367 goto out_err; 368 } 369 rep_sq->send_to_vport_rule = flow_rule; 370 rep_sq->sqn = sqns_array[i]; 371 372 if (peer_esw) { 373 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, 374 rep, sqns_array[i]); 375 if (IS_ERR(flow_rule)) { 376 err = PTR_ERR(flow_rule); 377 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); 378 kfree(rep_sq); 379 goto out_err; 380 } 381 rep_sq->send_to_vport_rule_peer = flow_rule; 382 } 383 384 list_add(&rep_sq->list, &rpriv->vport_sqs_list); 385 } 386 387 if (peer_esw) 388 mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); 389 390 return 0; 391 392 out_err: 393 mlx5e_sqs2vport_stop(esw, rep); 394 395 if (peer_esw) 396 mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); 397 398 return err; 399 } 400 401 static int 402 mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) 403 { 404 int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params); 405 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 406 bool is_uplink_rep = mlx5e_is_uplink_rep(priv); 407 struct mlx5e_rep_priv *rpriv = priv->ppriv; 408 struct mlx5_eswitch_rep *rep = rpriv->rep; 409 int n, tc, nch, num_sqs = 0; 410 struct mlx5e_channel *c; 411 int err = -ENOMEM; 412 bool ptp_sq; 413 u32 *sqs; 414 415 ptp_sq = !!(priv->channels.ptp && 416 MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)); 417 nch = priv->channels.num + ptp_sq; 418 /* +2 for xdpsqs, they don't exist on the ptp channel but will not be 419 * counted for by num_sqs. 420 */ 421 if (is_uplink_rep) 422 sqs_per_channel += 2; 423 424 sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL); 425 if (!sqs) 426 goto out; 427 428 for (n = 0; n < priv->channels.num; n++) { 429 c = priv->channels.c[n]; 430 for (tc = 0; tc < c->num_tc; tc++) 431 sqs[num_sqs++] = c->sq[tc].sqn; 432 433 if (is_uplink_rep) { 434 if (c->xdp) 435 sqs[num_sqs++] = c->rq_xdpsq.sqn; 436 437 sqs[num_sqs++] = c->xdpsq.sqn; 438 } 439 } 440 if (ptp_sq) { 441 struct mlx5e_ptp *ptp_ch = priv->channels.ptp; 442 443 for (tc = 0; tc < ptp_ch->num_tc; tc++) 444 sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn; 445 } 446 447 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); 448 kvfree(sqs); 449 450 out: 451 if (err) 452 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err); 453 return err; 454 } 455 456 static void 457 mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) 458 { 459 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 460 struct mlx5e_rep_priv *rpriv = priv->ppriv; 461 struct mlx5_eswitch_rep *rep = rpriv->rep; 462 463 mlx5e_sqs2vport_stop(esw, rep); 464 } 465 466 static int 467 mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv) 468 { 469 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 470 struct mlx5e_rep_priv *rpriv = priv->ppriv; 471 struct mlx5_eswitch_rep *rep = rpriv->rep; 472 struct mlx5_flow_handle *flow_rule; 473 struct mlx5_flow_group *g; 474 475 g = esw->fdb_table.offloads.send_to_vport_meta_grp; 476 if (!g) 477 return 0; 478 479 flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport); 480 if (IS_ERR(flow_rule)) 481 return PTR_ERR(flow_rule); 482 483 rpriv->send_to_vport_meta_rule = flow_rule; 484 485 return 0; 486 } 487 488 static void 489 mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv) 490 { 491 struct mlx5e_rep_priv *rpriv = priv->ppriv; 492 493 if (rpriv->send_to_vport_meta_rule) 494 mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule); 495 } 496 497 void mlx5e_rep_activate_channels(struct mlx5e_priv *priv) 498 { 499 mlx5e_add_sqs_fwd_rules(priv); 500 mlx5e_rep_add_meta_tunnel_rule(priv); 501 } 502 503 void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv) 504 { 505 mlx5e_rep_del_meta_tunnel_rule(priv); 506 mlx5e_remove_sqs_fwd_rules(priv); 507 } 508 509 static int mlx5e_rep_open(struct net_device *dev) 510 { 511 struct mlx5e_priv *priv = netdev_priv(dev); 512 struct mlx5e_rep_priv *rpriv = priv->ppriv; 513 struct mlx5_eswitch_rep *rep = rpriv->rep; 514 int err; 515 516 mutex_lock(&priv->state_lock); 517 err = mlx5e_open_locked(dev); 518 if (err) 519 goto unlock; 520 521 if (!mlx5_modify_vport_admin_state(priv->mdev, 522 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 523 rep->vport, 1, 524 MLX5_VPORT_ADMIN_STATE_UP)) 525 netif_carrier_on(dev); 526 527 unlock: 528 mutex_unlock(&priv->state_lock); 529 return err; 530 } 531 532 static int mlx5e_rep_close(struct net_device *dev) 533 { 534 struct mlx5e_priv *priv = netdev_priv(dev); 535 struct mlx5e_rep_priv *rpriv = priv->ppriv; 536 struct mlx5_eswitch_rep *rep = rpriv->rep; 537 int ret; 538 539 mutex_lock(&priv->state_lock); 540 mlx5_modify_vport_admin_state(priv->mdev, 541 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 542 rep->vport, 1, 543 MLX5_VPORT_ADMIN_STATE_DOWN); 544 ret = mlx5e_close_locked(dev); 545 mutex_unlock(&priv->state_lock); 546 return ret; 547 } 548 549 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) 550 { 551 struct mlx5e_rep_priv *rpriv = priv->ppriv; 552 struct mlx5_eswitch_rep *rep; 553 554 if (!MLX5_ESWITCH_MANAGER(priv->mdev)) 555 return false; 556 557 if (!rpriv) /* non vport rep mlx5e instances don't use this field */ 558 return false; 559 560 rep = rpriv->rep; 561 return (rep->vport == MLX5_VPORT_UPLINK); 562 } 563 564 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) 565 { 566 switch (attr_id) { 567 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 568 return true; 569 } 570 571 return false; 572 } 573 574 static int 575 mlx5e_get_sw_stats64(const struct net_device *dev, 576 struct rtnl_link_stats64 *stats) 577 { 578 struct mlx5e_priv *priv = netdev_priv(dev); 579 580 mlx5e_fold_sw_stats64(priv, stats); 581 return 0; 582 } 583 584 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, 585 void *sp) 586 { 587 switch (attr_id) { 588 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 589 return mlx5e_get_sw_stats64(dev, sp); 590 } 591 592 return -EINVAL; 593 } 594 595 static void 596 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 597 { 598 struct mlx5e_priv *priv = netdev_priv(dev); 599 600 /* update HW stats in background for next time */ 601 mlx5e_queue_update_stats(priv); 602 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); 603 } 604 605 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) 606 { 607 return mlx5e_change_mtu(netdev, new_mtu, NULL); 608 } 609 610 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev) 611 { 612 struct mlx5e_priv *priv = netdev_priv(netdev); 613 struct mlx5e_rep_priv *rpriv = priv->ppriv; 614 struct mlx5_core_dev *dev = priv->mdev; 615 616 return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); 617 } 618 619 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier) 620 { 621 struct mlx5e_priv *priv = netdev_priv(dev); 622 struct mlx5e_rep_priv *rpriv = priv->ppriv; 623 struct mlx5_eswitch_rep *rep = rpriv->rep; 624 int err; 625 626 if (new_carrier) { 627 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 628 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP); 629 if (err) 630 return err; 631 netif_carrier_on(dev); 632 } else { 633 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 634 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN); 635 if (err) 636 return err; 637 netif_carrier_off(dev); 638 } 639 return 0; 640 } 641 642 static const struct net_device_ops mlx5e_netdev_ops_rep = { 643 .ndo_open = mlx5e_rep_open, 644 .ndo_stop = mlx5e_rep_close, 645 .ndo_start_xmit = mlx5e_xmit, 646 .ndo_setup_tc = mlx5e_rep_setup_tc, 647 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, 648 .ndo_get_stats64 = mlx5e_rep_get_stats, 649 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, 650 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, 651 .ndo_change_mtu = mlx5e_rep_change_mtu, 652 .ndo_change_carrier = mlx5e_rep_change_carrier, 653 }; 654 655 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev) 656 { 657 return netdev->netdev_ops == &mlx5e_netdev_ops && 658 mlx5e_is_uplink_rep(netdev_priv(netdev)); 659 } 660 661 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev) 662 { 663 return netdev->netdev_ops == &mlx5e_netdev_ops_rep; 664 } 665 666 /* One indirect TIR set for outer. Inner not supported in reps. */ 667 #define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS 668 669 static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev) 670 { 671 int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir); 672 int num_vports = mlx5_eswitch_get_total_vports(mdev); 673 674 return (max_tir_num - mlx5e_get_pf_num_tirs(mdev) 675 - (num_vports * REP_NUM_INDIR_TIRS)) / num_vports; 676 } 677 678 static void mlx5e_build_rep_params(struct net_device *netdev) 679 { 680 struct mlx5e_priv *priv = netdev_priv(netdev); 681 struct mlx5e_rep_priv *rpriv = priv->ppriv; 682 struct mlx5_eswitch_rep *rep = rpriv->rep; 683 struct mlx5_core_dev *mdev = priv->mdev; 684 struct mlx5e_params *params; 685 686 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 687 MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 688 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 689 690 params = &priv->channels.params; 691 692 params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; 693 params->hard_mtu = MLX5E_ETH_HARD_MTU; 694 params->sw_mtu = netdev->mtu; 695 696 /* SQ */ 697 if (rep->vport == MLX5_VPORT_UPLINK) 698 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 699 else 700 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; 701 702 /* RQ */ 703 mlx5e_build_rq_params(mdev, params); 704 705 /* CQ moderation params */ 706 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 707 mlx5e_set_rx_cq_mode_params(params, cq_period_mode); 708 709 params->mqprio.num_tc = 1; 710 params->tunneled_offload_en = false; 711 if (rep->vport != MLX5_VPORT_UPLINK) 712 params->vlan_strip_disable = true; 713 714 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); 715 } 716 717 static void mlx5e_build_rep_netdev(struct net_device *netdev, 718 struct mlx5_core_dev *mdev) 719 { 720 SET_NETDEV_DEV(netdev, mdev->device); 721 netdev->netdev_ops = &mlx5e_netdev_ops_rep; 722 eth_hw_addr_random(netdev); 723 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; 724 725 netdev->watchdog_timeo = 15 * HZ; 726 727 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 728 netdev->hw_features |= NETIF_F_HW_TC; 729 #endif 730 netdev->hw_features |= NETIF_F_SG; 731 netdev->hw_features |= NETIF_F_IP_CSUM; 732 netdev->hw_features |= NETIF_F_IPV6_CSUM; 733 netdev->hw_features |= NETIF_F_GRO; 734 netdev->hw_features |= NETIF_F_TSO; 735 netdev->hw_features |= NETIF_F_TSO6; 736 netdev->hw_features |= NETIF_F_RXCSUM; 737 738 netdev->features |= netdev->hw_features; 739 netdev->features |= NETIF_F_NETNS_LOCAL; 740 } 741 742 static int mlx5e_init_rep(struct mlx5_core_dev *mdev, 743 struct net_device *netdev) 744 { 745 struct mlx5e_priv *priv = netdev_priv(netdev); 746 747 priv->fs = mlx5e_fs_init(priv->profile, mdev, 748 !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 749 if (!priv->fs) { 750 netdev_err(priv->netdev, "FS allocation failed\n"); 751 return -ENOMEM; 752 } 753 754 mlx5e_build_rep_params(netdev); 755 mlx5e_timestamp_init(priv); 756 757 return 0; 758 } 759 760 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, 761 struct net_device *netdev) 762 { 763 struct mlx5e_priv *priv = netdev_priv(netdev); 764 int err; 765 766 priv->fs = mlx5e_fs_init(priv->profile, mdev, 767 !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 768 if (!priv->fs) { 769 netdev_err(priv->netdev, "FS allocation failed\n"); 770 return -ENOMEM; 771 } 772 773 err = mlx5e_ipsec_init(priv); 774 if (err) 775 mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err); 776 777 mlx5e_vxlan_set_netdev_info(priv); 778 mlx5e_build_rep_params(netdev); 779 mlx5e_timestamp_init(priv); 780 return 0; 781 } 782 783 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) 784 { 785 mlx5e_fs_cleanup(priv->fs); 786 mlx5e_ipsec_cleanup(priv); 787 } 788 789 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) 790 { 791 struct mlx5e_rep_priv *rpriv = priv->ppriv; 792 struct mlx5_eswitch_rep *rep = rpriv->rep; 793 struct ttc_params ttc_params = {}; 794 int err; 795 796 mlx5e_fs_set_ns(priv->fs, 797 mlx5_get_flow_namespace(priv->mdev, 798 MLX5_FLOW_NAMESPACE_KERNEL), false); 799 800 /* The inner_ttc in the ttc params is intentionally not set */ 801 mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false); 802 803 if (rep->vport != MLX5_VPORT_UPLINK) 804 /* To give uplik rep TTC a lower level for chaining from root ft */ 805 ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1; 806 807 mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false); 808 if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) { 809 err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false)); 810 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", 811 err); 812 return err; 813 } 814 return 0; 815 } 816 817 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) 818 { 819 struct mlx5e_rep_priv *rpriv = priv->ppriv; 820 struct mlx5_eswitch_rep *rep = rpriv->rep; 821 struct mlx5_flow_table_attr ft_attr = {}; 822 struct mlx5_flow_namespace *ns; 823 int err = 0; 824 825 if (rep->vport != MLX5_VPORT_UPLINK) { 826 /* non uplik reps will skip any bypass tables and go directly to 827 * their own ttc 828 */ 829 rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false)); 830 return 0; 831 } 832 833 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */ 834 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS); 835 if (!ns) { 836 netdev_err(priv->netdev, "Failed to get reps offloads namespace\n"); 837 return -EOPNOTSUPP; 838 } 839 840 ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */ 841 ft_attr.prio = 1; 842 ft_attr.level = 1; 843 844 rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr); 845 if (IS_ERR(rpriv->root_ft)) { 846 err = PTR_ERR(rpriv->root_ft); 847 rpriv->root_ft = NULL; 848 } 849 850 return err; 851 } 852 853 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv) 854 { 855 struct mlx5e_rep_priv *rpriv = priv->ppriv; 856 struct mlx5_eswitch_rep *rep = rpriv->rep; 857 858 if (rep->vport != MLX5_VPORT_UPLINK) 859 return; 860 mlx5_destroy_flow_table(rpriv->root_ft); 861 } 862 863 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) 864 { 865 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 866 struct mlx5e_rep_priv *rpriv = priv->ppriv; 867 struct mlx5_eswitch_rep *rep = rpriv->rep; 868 struct mlx5_flow_handle *flow_rule; 869 struct mlx5_flow_destination dest; 870 871 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 872 dest.ft = rpriv->root_ft; 873 874 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest); 875 if (IS_ERR(flow_rule)) 876 return PTR_ERR(flow_rule); 877 rpriv->vport_rx_rule = flow_rule; 878 return 0; 879 } 880 881 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv) 882 { 883 struct mlx5e_rep_priv *rpriv = priv->ppriv; 884 885 if (!rpriv->vport_rx_rule) 886 return; 887 888 mlx5_del_flow_rules(rpriv->vport_rx_rule); 889 rpriv->vport_rx_rule = NULL; 890 } 891 892 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) 893 { 894 rep_vport_rx_rule_destroy(priv); 895 896 return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv); 897 } 898 899 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) 900 { 901 struct mlx5_core_dev *mdev = priv->mdev; 902 int err; 903 904 priv->rx_res = mlx5e_rx_res_alloc(); 905 if (!priv->rx_res) { 906 err = -ENOMEM; 907 goto err_free_fs; 908 } 909 910 mlx5e_fs_init_l2_addr(priv->fs, priv->netdev); 911 912 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 913 if (err) { 914 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 915 return err; 916 } 917 918 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, 919 priv->max_nch, priv->drop_rq.rqn, 920 &priv->channels.params.packet_merge, 921 priv->channels.params.num_channels); 922 if (err) 923 goto err_close_drop_rq; 924 925 err = mlx5e_create_rep_ttc_table(priv); 926 if (err) 927 goto err_destroy_rx_res; 928 929 err = mlx5e_create_rep_root_ft(priv); 930 if (err) 931 goto err_destroy_ttc_table; 932 933 err = mlx5e_create_rep_vport_rx_rule(priv); 934 if (err) 935 goto err_destroy_root_ft; 936 937 mlx5e_ethtool_init_steering(priv->fs); 938 939 return 0; 940 941 err_destroy_root_ft: 942 mlx5e_destroy_rep_root_ft(priv); 943 err_destroy_ttc_table: 944 mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); 945 err_destroy_rx_res: 946 mlx5e_rx_res_destroy(priv->rx_res); 947 err_close_drop_rq: 948 mlx5e_close_drop_rq(&priv->drop_rq); 949 mlx5e_rx_res_free(priv->rx_res); 950 priv->rx_res = NULL; 951 err_free_fs: 952 mlx5e_fs_cleanup(priv->fs); 953 return err; 954 } 955 956 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) 957 { 958 mlx5e_ethtool_cleanup_steering(priv->fs); 959 rep_vport_rx_rule_destroy(priv); 960 mlx5e_destroy_rep_root_ft(priv); 961 mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); 962 mlx5e_rx_res_destroy(priv->rx_res); 963 mlx5e_close_drop_rq(&priv->drop_rq); 964 mlx5e_rx_res_free(priv->rx_res); 965 priv->rx_res = NULL; 966 } 967 968 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) 969 { 970 int err; 971 972 mlx5e_create_q_counters(priv); 973 err = mlx5e_init_rep_rx(priv); 974 if (err) 975 goto out; 976 977 mlx5e_tc_int_port_init_rep_rx(priv); 978 979 out: 980 return err; 981 } 982 983 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) 984 { 985 mlx5e_tc_int_port_cleanup_rep_rx(priv); 986 mlx5e_cleanup_rep_rx(priv); 987 mlx5e_destroy_q_counters(priv); 988 } 989 990 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 991 { 992 struct mlx5_rep_uplink_priv *uplink_priv; 993 struct net_device *netdev; 994 struct mlx5e_priv *priv; 995 int err; 996 997 netdev = rpriv->netdev; 998 priv = netdev_priv(netdev); 999 uplink_priv = &rpriv->uplink_priv; 1000 1001 err = mlx5e_rep_tc_init(rpriv); 1002 if (err) 1003 return err; 1004 1005 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); 1006 1007 mlx5e_rep_bond_init(rpriv); 1008 err = mlx5e_rep_tc_netdevice_event_register(rpriv); 1009 if (err) { 1010 mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n", 1011 err); 1012 goto err_event_reg; 1013 } 1014 1015 return 0; 1016 1017 err_event_reg: 1018 mlx5e_rep_bond_cleanup(rpriv); 1019 mlx5e_rep_tc_cleanup(rpriv); 1020 return err; 1021 } 1022 1023 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 1024 { 1025 mlx5e_rep_tc_netdevice_event_unregister(rpriv); 1026 mlx5e_rep_bond_cleanup(rpriv); 1027 mlx5e_rep_tc_cleanup(rpriv); 1028 } 1029 1030 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) 1031 { 1032 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1033 int err; 1034 1035 err = mlx5e_create_tises(priv); 1036 if (err) { 1037 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); 1038 return err; 1039 } 1040 1041 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { 1042 err = mlx5e_init_uplink_rep_tx(rpriv); 1043 if (err) 1044 goto err_init_tx; 1045 } 1046 1047 err = mlx5e_tc_ht_init(&rpriv->tc_ht); 1048 if (err) 1049 goto err_ht_init; 1050 1051 return 0; 1052 1053 err_ht_init: 1054 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) 1055 mlx5e_cleanup_uplink_rep_tx(rpriv); 1056 err_init_tx: 1057 mlx5e_destroy_tises(priv); 1058 return err; 1059 } 1060 1061 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) 1062 { 1063 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1064 1065 mlx5e_tc_ht_cleanup(&rpriv->tc_ht); 1066 1067 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) 1068 mlx5e_cleanup_uplink_rep_tx(rpriv); 1069 1070 mlx5e_destroy_tises(priv); 1071 } 1072 1073 static void mlx5e_rep_enable(struct mlx5e_priv *priv) 1074 { 1075 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1076 1077 mlx5e_set_netdev_mtu_boundaries(priv); 1078 mlx5e_rep_neigh_init(rpriv); 1079 } 1080 1081 static void mlx5e_rep_disable(struct mlx5e_priv *priv) 1082 { 1083 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1084 1085 mlx5e_rep_neigh_cleanup(rpriv); 1086 } 1087 1088 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) 1089 { 1090 return 0; 1091 } 1092 1093 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) 1094 { 1095 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); 1096 1097 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { 1098 struct mlx5_eqe *eqe = data; 1099 1100 switch (eqe->sub_type) { 1101 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 1102 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 1103 queue_work(priv->wq, &priv->update_carrier_work); 1104 break; 1105 default: 1106 return NOTIFY_DONE; 1107 } 1108 1109 return NOTIFY_OK; 1110 } 1111 1112 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) 1113 return mlx5e_rep_tc_event_port_affinity(priv); 1114 1115 return NOTIFY_DONE; 1116 } 1117 1118 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) 1119 { 1120 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1121 struct net_device *netdev = priv->netdev; 1122 struct mlx5_core_dev *mdev = priv->mdev; 1123 u16 max_mtu; 1124 1125 netdev->min_mtu = ETH_MIN_MTU; 1126 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); 1127 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); 1128 mlx5e_set_dev_port_mtu(priv); 1129 1130 mlx5e_rep_tc_enable(priv); 1131 1132 if (MLX5_CAP_GEN(mdev, uplink_follow)) 1133 mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, 1134 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); 1135 mlx5_lag_add_netdev(mdev, netdev); 1136 priv->events_nb.notifier_call = uplink_rep_async_event; 1137 mlx5_notifier_register(mdev, &priv->events_nb); 1138 mlx5e_dcbnl_initialize(priv); 1139 mlx5e_dcbnl_init_app(priv); 1140 mlx5e_rep_neigh_init(rpriv); 1141 mlx5e_rep_bridge_init(priv); 1142 1143 netdev->wanted_features |= NETIF_F_HW_TC; 1144 1145 rtnl_lock(); 1146 if (netif_running(netdev)) 1147 mlx5e_open(netdev); 1148 udp_tunnel_nic_reset_ntf(priv->netdev); 1149 netif_device_attach(netdev); 1150 rtnl_unlock(); 1151 } 1152 1153 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) 1154 { 1155 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1156 struct mlx5_core_dev *mdev = priv->mdev; 1157 1158 rtnl_lock(); 1159 if (netif_running(priv->netdev)) 1160 mlx5e_close(priv->netdev); 1161 netif_device_detach(priv->netdev); 1162 rtnl_unlock(); 1163 1164 mlx5e_rep_bridge_cleanup(priv); 1165 mlx5e_rep_neigh_cleanup(rpriv); 1166 mlx5e_dcbnl_delete_app(priv); 1167 mlx5_notifier_unregister(mdev, &priv->events_nb); 1168 mlx5e_rep_tc_disable(priv); 1169 mlx5_lag_remove_netdev(mdev, priv->netdev); 1170 mlx5_vxlan_reset_to_default(mdev->vxlan); 1171 } 1172 1173 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0); 1174 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS); 1175 1176 /* The stats groups order is opposite to the update_stats() order calls */ 1177 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = { 1178 &MLX5E_STATS_GRP(sw_rep), 1179 &MLX5E_STATS_GRP(vport_rep), 1180 }; 1181 1182 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv) 1183 { 1184 return ARRAY_SIZE(mlx5e_rep_stats_grps); 1185 } 1186 1187 /* The stats groups order is opposite to the update_stats() order calls */ 1188 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { 1189 &MLX5E_STATS_GRP(sw), 1190 &MLX5E_STATS_GRP(qcnt), 1191 &MLX5E_STATS_GRP(vnic_env), 1192 &MLX5E_STATS_GRP(vport), 1193 &MLX5E_STATS_GRP(802_3), 1194 &MLX5E_STATS_GRP(2863), 1195 &MLX5E_STATS_GRP(2819), 1196 &MLX5E_STATS_GRP(phy), 1197 &MLX5E_STATS_GRP(eth_ext), 1198 &MLX5E_STATS_GRP(pcie), 1199 &MLX5E_STATS_GRP(per_prio), 1200 &MLX5E_STATS_GRP(pme), 1201 &MLX5E_STATS_GRP(channels), 1202 &MLX5E_STATS_GRP(per_port_buff_congest), 1203 #ifdef CONFIG_MLX5_EN_IPSEC 1204 &MLX5E_STATS_GRP(ipsec_sw), 1205 #endif 1206 &MLX5E_STATS_GRP(ptp), 1207 }; 1208 1209 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) 1210 { 1211 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps); 1212 } 1213 1214 static const struct mlx5e_profile mlx5e_rep_profile = { 1215 .init = mlx5e_init_rep, 1216 .cleanup = mlx5e_cleanup_rep, 1217 .init_rx = mlx5e_init_rep_rx, 1218 .cleanup_rx = mlx5e_cleanup_rep_rx, 1219 .init_tx = mlx5e_init_rep_tx, 1220 .cleanup_tx = mlx5e_cleanup_rep_tx, 1221 .enable = mlx5e_rep_enable, 1222 .disable = mlx5e_rep_disable, 1223 .update_rx = mlx5e_update_rep_rx, 1224 .update_stats = mlx5e_stats_update_ndo_stats, 1225 .rx_handlers = &mlx5e_rx_handlers_rep, 1226 .max_tc = 1, 1227 .stats_grps = mlx5e_rep_stats_grps, 1228 .stats_grps_num = mlx5e_rep_stats_grps_num, 1229 .max_nch_limit = mlx5e_rep_max_nch_limit, 1230 }; 1231 1232 static const struct mlx5e_profile mlx5e_uplink_rep_profile = { 1233 .init = mlx5e_init_ul_rep, 1234 .cleanup = mlx5e_cleanup_rep, 1235 .init_rx = mlx5e_init_ul_rep_rx, 1236 .cleanup_rx = mlx5e_cleanup_ul_rep_rx, 1237 .init_tx = mlx5e_init_rep_tx, 1238 .cleanup_tx = mlx5e_cleanup_rep_tx, 1239 .enable = mlx5e_uplink_rep_enable, 1240 .disable = mlx5e_uplink_rep_disable, 1241 .update_rx = mlx5e_update_rep_rx, 1242 .update_stats = mlx5e_stats_update_ndo_stats, 1243 .update_carrier = mlx5e_update_carrier, 1244 .rx_handlers = &mlx5e_rx_handlers_rep, 1245 .max_tc = MLX5E_MAX_NUM_TC, 1246 .stats_grps = mlx5e_ul_rep_stats_grps, 1247 .stats_grps_num = mlx5e_ul_rep_stats_grps_num, 1248 }; 1249 1250 /* e-Switch vport representors */ 1251 static int 1252 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 1253 { 1254 struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); 1255 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); 1256 struct devlink_port *dl_port; 1257 int err; 1258 1259 rpriv->netdev = priv->netdev; 1260 1261 err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, 1262 rpriv); 1263 if (err) 1264 return err; 1265 1266 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); 1267 if (dl_port) 1268 devlink_port_type_eth_set(dl_port, rpriv->netdev); 1269 1270 return 0; 1271 } 1272 1273 static void 1274 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv) 1275 { 1276 struct net_device *netdev = rpriv->netdev; 1277 struct devlink_port *dl_port; 1278 struct mlx5_core_dev *dev; 1279 struct mlx5e_priv *priv; 1280 1281 priv = netdev_priv(netdev); 1282 dev = priv->mdev; 1283 1284 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); 1285 if (dl_port) 1286 devlink_port_type_clear(dl_port); 1287 mlx5e_netdev_attach_nic_profile(priv); 1288 } 1289 1290 static int 1291 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 1292 { 1293 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); 1294 const struct mlx5e_profile *profile; 1295 struct devlink_port *dl_port; 1296 struct net_device *netdev; 1297 struct mlx5e_priv *priv; 1298 int err; 1299 1300 profile = &mlx5e_rep_profile; 1301 netdev = mlx5e_create_netdev(dev, profile); 1302 if (!netdev) { 1303 mlx5_core_warn(dev, 1304 "Failed to create representor netdev for vport %d\n", 1305 rep->vport); 1306 return -EINVAL; 1307 } 1308 1309 mlx5e_build_rep_netdev(netdev, dev); 1310 rpriv->netdev = netdev; 1311 1312 priv = netdev_priv(netdev); 1313 priv->profile = profile; 1314 priv->ppriv = rpriv; 1315 err = profile->init(dev, netdev); 1316 if (err) { 1317 netdev_warn(netdev, "rep profile init failed, %d\n", err); 1318 goto err_destroy_netdev; 1319 } 1320 1321 err = mlx5e_attach_netdev(netdev_priv(netdev)); 1322 if (err) { 1323 netdev_warn(netdev, 1324 "Failed to attach representor netdev for vport %d\n", 1325 rep->vport); 1326 goto err_cleanup_profile; 1327 } 1328 1329 err = register_netdev(netdev); 1330 if (err) { 1331 netdev_warn(netdev, 1332 "Failed to register representor netdev for vport %d\n", 1333 rep->vport); 1334 goto err_detach_netdev; 1335 } 1336 1337 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); 1338 if (dl_port) 1339 devlink_port_type_eth_set(dl_port, netdev); 1340 return 0; 1341 1342 err_detach_netdev: 1343 mlx5e_detach_netdev(netdev_priv(netdev)); 1344 1345 err_cleanup_profile: 1346 priv->profile->cleanup(priv); 1347 1348 err_destroy_netdev: 1349 mlx5e_destroy_netdev(netdev_priv(netdev)); 1350 return err; 1351 } 1352 1353 static int 1354 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 1355 { 1356 struct mlx5e_rep_priv *rpriv; 1357 int err; 1358 1359 rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL); 1360 if (!rpriv) 1361 return -ENOMEM; 1362 1363 /* rpriv->rep to be looked up when profile->init() is called */ 1364 rpriv->rep = rep; 1365 rep->rep_data[REP_ETH].priv = rpriv; 1366 INIT_LIST_HEAD(&rpriv->vport_sqs_list); 1367 1368 if (rep->vport == MLX5_VPORT_UPLINK) 1369 err = mlx5e_vport_uplink_rep_load(dev, rep); 1370 else 1371 err = mlx5e_vport_vf_rep_load(dev, rep); 1372 1373 if (err) 1374 kvfree(rpriv); 1375 1376 return err; 1377 } 1378 1379 static void 1380 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) 1381 { 1382 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); 1383 struct net_device *netdev = rpriv->netdev; 1384 struct mlx5e_priv *priv = netdev_priv(netdev); 1385 struct mlx5_core_dev *dev = priv->mdev; 1386 struct devlink_port *dl_port; 1387 void *ppriv = priv->ppriv; 1388 1389 if (rep->vport == MLX5_VPORT_UPLINK) { 1390 mlx5e_vport_uplink_rep_unload(rpriv); 1391 goto free_ppriv; 1392 } 1393 1394 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); 1395 if (dl_port) 1396 devlink_port_type_clear(dl_port); 1397 unregister_netdev(netdev); 1398 mlx5e_detach_netdev(priv); 1399 priv->profile->cleanup(priv); 1400 mlx5e_destroy_netdev(priv); 1401 free_ppriv: 1402 kvfree(ppriv); /* mlx5e_rep_priv */ 1403 } 1404 1405 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) 1406 { 1407 struct mlx5e_rep_priv *rpriv; 1408 1409 rpriv = mlx5e_rep_to_rep_priv(rep); 1410 1411 return rpriv->netdev; 1412 } 1413 1414 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep) 1415 { 1416 struct mlx5e_rep_priv *rpriv; 1417 struct mlx5e_rep_sq *rep_sq; 1418 1419 rpriv = mlx5e_rep_to_rep_priv(rep); 1420 list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { 1421 if (!rep_sq->send_to_vport_rule_peer) 1422 continue; 1423 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); 1424 rep_sq->send_to_vport_rule_peer = NULL; 1425 } 1426 } 1427 1428 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, 1429 struct mlx5_eswitch_rep *rep, 1430 struct mlx5_eswitch *peer_esw) 1431 { 1432 struct mlx5_flow_handle *flow_rule; 1433 struct mlx5e_rep_priv *rpriv; 1434 struct mlx5e_rep_sq *rep_sq; 1435 1436 rpriv = mlx5e_rep_to_rep_priv(rep); 1437 list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { 1438 if (rep_sq->send_to_vport_rule_peer) 1439 continue; 1440 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn); 1441 if (IS_ERR(flow_rule)) 1442 goto err_out; 1443 rep_sq->send_to_vport_rule_peer = flow_rule; 1444 } 1445 1446 return 0; 1447 err_out: 1448 mlx5e_vport_rep_event_unpair(rep); 1449 return PTR_ERR(flow_rule); 1450 } 1451 1452 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, 1453 struct mlx5_eswitch_rep *rep, 1454 enum mlx5_switchdev_event event, 1455 void *data) 1456 { 1457 int err = 0; 1458 1459 if (event == MLX5_SWITCHDEV_EVENT_PAIR) 1460 err = mlx5e_vport_rep_event_pair(esw, rep, data); 1461 else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR) 1462 mlx5e_vport_rep_event_unpair(rep); 1463 1464 return err; 1465 } 1466 1467 static const struct mlx5_eswitch_rep_ops rep_ops = { 1468 .load = mlx5e_vport_rep_load, 1469 .unload = mlx5e_vport_rep_unload, 1470 .get_proto_dev = mlx5e_vport_rep_get_proto_dev, 1471 .event = mlx5e_vport_rep_event, 1472 }; 1473 1474 static int mlx5e_rep_probe(struct auxiliary_device *adev, 1475 const struct auxiliary_device_id *id) 1476 { 1477 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); 1478 struct mlx5_core_dev *mdev = edev->mdev; 1479 struct mlx5_eswitch *esw; 1480 1481 esw = mdev->priv.eswitch; 1482 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH); 1483 return 0; 1484 } 1485 1486 static void mlx5e_rep_remove(struct auxiliary_device *adev) 1487 { 1488 struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev); 1489 struct mlx5_core_dev *mdev = vdev->mdev; 1490 struct mlx5_eswitch *esw; 1491 1492 esw = mdev->priv.eswitch; 1493 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); 1494 } 1495 1496 static const struct auxiliary_device_id mlx5e_rep_id_table[] = { 1497 { .name = MLX5_ADEV_NAME ".eth-rep", }, 1498 {}, 1499 }; 1500 1501 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table); 1502 1503 static struct auxiliary_driver mlx5e_rep_driver = { 1504 .name = "eth-rep", 1505 .probe = mlx5e_rep_probe, 1506 .remove = mlx5e_rep_remove, 1507 .id_table = mlx5e_rep_id_table, 1508 }; 1509 1510 int mlx5e_rep_init(void) 1511 { 1512 return auxiliary_driver_register(&mlx5e_rep_driver); 1513 } 1514 1515 void mlx5e_rep_cleanup(void) 1516 { 1517 auxiliary_driver_unregister(&mlx5e_rep_driver); 1518 } 1519