1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <generated/utsrelease.h> 34 #include <linux/mlx5/fs.h> 35 #include <net/switchdev.h> 36 #include <net/pkt_cls.h> 37 #include <net/act_api.h> 38 #include <net/devlink.h> 39 #include <net/ipv6_stubs.h> 40 41 #include "eswitch.h" 42 #include "esw/chains.h" 43 #include "en.h" 44 #include "en_rep.h" 45 #include "en/txrx.h" 46 #include "en_tc.h" 47 #include "en/rep/tc.h" 48 #include "en/rep/neigh.h" 49 #include "fs_core.h" 50 #include "lib/mlx5.h" 51 #define CREATE_TRACE_POINTS 52 #include "diag/en_rep_tracepoint.h" 53 54 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ 55 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) 56 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 57 58 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; 59 60 static void mlx5e_rep_get_drvinfo(struct net_device *dev, 61 struct ethtool_drvinfo *drvinfo) 62 { 63 struct mlx5e_priv *priv = netdev_priv(dev); 64 struct mlx5_core_dev *mdev = priv->mdev; 65 66 strlcpy(drvinfo->driver, mlx5e_rep_driver_name, 67 sizeof(drvinfo->driver)); 68 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); 69 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 70 "%d.%d.%04d (%.16s)", 71 fw_rev_maj(mdev), fw_rev_min(mdev), 72 fw_rev_sub(mdev), mdev->board_id); 73 } 74 75 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev, 76 struct ethtool_drvinfo *drvinfo) 77 { 78 struct mlx5e_priv *priv = netdev_priv(dev); 79 80 mlx5e_rep_get_drvinfo(dev, drvinfo); 81 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev), 82 sizeof(drvinfo->bus_info)); 83 } 84 85 static const struct counter_desc sw_rep_stats_desc[] = { 86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 90 }; 91 92 struct vport_stats { 93 u64 vport_rx_packets; 94 u64 vport_tx_packets; 95 u64 vport_rx_bytes; 96 u64 vport_tx_bytes; 97 }; 98 99 static const struct counter_desc vport_rep_stats_desc[] = { 100 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) }, 101 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) }, 102 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) }, 103 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) }, 104 }; 105 106 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) 107 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) 108 109 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep) 110 { 111 return NUM_VPORT_REP_SW_COUNTERS; 112 } 113 114 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep) 115 { 116 int i; 117 118 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 119 strcpy(data + (idx++) * ETH_GSTRING_LEN, 120 sw_rep_stats_desc[i].format); 121 return idx; 122 } 123 124 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep) 125 { 126 int i; 127 128 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 129 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, 130 sw_rep_stats_desc, i); 131 return idx; 132 } 133 134 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep) 135 { 136 struct mlx5e_sw_stats *s = &priv->stats.sw; 137 struct rtnl_link_stats64 stats64 = {}; 138 139 memset(s, 0, sizeof(*s)); 140 mlx5e_fold_sw_stats64(priv, &stats64); 141 142 s->rx_packets = stats64.rx_packets; 143 s->rx_bytes = stats64.rx_bytes; 144 s->tx_packets = stats64.tx_packets; 145 s->tx_bytes = stats64.tx_bytes; 146 s->tx_queue_dropped = stats64.tx_dropped; 147 } 148 149 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep) 150 { 151 return NUM_VPORT_REP_HW_COUNTERS; 152 } 153 154 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) 155 { 156 int i; 157 158 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) 159 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format); 160 return idx; 161 } 162 163 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) 164 { 165 int i; 166 167 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) 168 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, 169 vport_rep_stats_desc, i); 170 return idx; 171 } 172 173 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) 174 { 175 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 176 struct mlx5e_rep_priv *rpriv = priv->ppriv; 177 struct mlx5_eswitch_rep *rep = rpriv->rep; 178 struct rtnl_link_stats64 *vport_stats; 179 struct ifla_vf_stats vf_stats; 180 int err; 181 182 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); 183 if (err) { 184 netdev_warn(priv->netdev, "vport %d error %d reading stats\n", 185 rep->vport, err); 186 return; 187 } 188 189 vport_stats = &priv->stats.vf_vport; 190 /* flip tx/rx as we are reporting the counters for the switch vport */ 191 vport_stats->rx_packets = vf_stats.tx_packets; 192 vport_stats->rx_bytes = vf_stats.tx_bytes; 193 vport_stats->tx_packets = vf_stats.rx_packets; 194 vport_stats->tx_bytes = vf_stats.rx_bytes; 195 } 196 197 static void mlx5e_rep_get_strings(struct net_device *dev, 198 u32 stringset, uint8_t *data) 199 { 200 struct mlx5e_priv *priv = netdev_priv(dev); 201 202 switch (stringset) { 203 case ETH_SS_STATS: 204 mlx5e_stats_fill_strings(priv, data); 205 break; 206 } 207 } 208 209 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, 210 struct ethtool_stats *stats, u64 *data) 211 { 212 struct mlx5e_priv *priv = netdev_priv(dev); 213 214 mlx5e_ethtool_get_ethtool_stats(priv, stats, data); 215 } 216 217 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) 218 { 219 struct mlx5e_priv *priv = netdev_priv(dev); 220 221 switch (sset) { 222 case ETH_SS_STATS: 223 return mlx5e_stats_total_num(priv); 224 default: 225 return -EOPNOTSUPP; 226 } 227 } 228 229 static void mlx5e_rep_get_ringparam(struct net_device *dev, 230 struct ethtool_ringparam *param) 231 { 232 struct mlx5e_priv *priv = netdev_priv(dev); 233 234 mlx5e_ethtool_get_ringparam(priv, param); 235 } 236 237 static int mlx5e_rep_set_ringparam(struct net_device *dev, 238 struct ethtool_ringparam *param) 239 { 240 struct mlx5e_priv *priv = netdev_priv(dev); 241 242 return mlx5e_ethtool_set_ringparam(priv, param); 243 } 244 245 static void mlx5e_rep_get_channels(struct net_device *dev, 246 struct ethtool_channels *ch) 247 { 248 struct mlx5e_priv *priv = netdev_priv(dev); 249 250 mlx5e_ethtool_get_channels(priv, ch); 251 } 252 253 static int mlx5e_rep_set_channels(struct net_device *dev, 254 struct ethtool_channels *ch) 255 { 256 struct mlx5e_priv *priv = netdev_priv(dev); 257 258 return mlx5e_ethtool_set_channels(priv, ch); 259 } 260 261 static int mlx5e_rep_get_coalesce(struct net_device *netdev, 262 struct ethtool_coalesce *coal) 263 { 264 struct mlx5e_priv *priv = netdev_priv(netdev); 265 266 return mlx5e_ethtool_get_coalesce(priv, coal); 267 } 268 269 static int mlx5e_rep_set_coalesce(struct net_device *netdev, 270 struct ethtool_coalesce *coal) 271 { 272 struct mlx5e_priv *priv = netdev_priv(netdev); 273 274 return mlx5e_ethtool_set_coalesce(priv, coal); 275 } 276 277 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) 278 { 279 struct mlx5e_priv *priv = netdev_priv(netdev); 280 281 return mlx5e_ethtool_get_rxfh_key_size(priv); 282 } 283 284 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) 285 { 286 struct mlx5e_priv *priv = netdev_priv(netdev); 287 288 return mlx5e_ethtool_get_rxfh_indir_size(priv); 289 } 290 291 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev, 292 struct ethtool_pauseparam *pauseparam) 293 { 294 struct mlx5e_priv *priv = netdev_priv(netdev); 295 296 mlx5e_ethtool_get_pauseparam(priv, pauseparam); 297 } 298 299 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev, 300 struct ethtool_pauseparam *pauseparam) 301 { 302 struct mlx5e_priv *priv = netdev_priv(netdev); 303 304 return mlx5e_ethtool_set_pauseparam(priv, pauseparam); 305 } 306 307 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev, 308 struct ethtool_link_ksettings *link_ksettings) 309 { 310 struct mlx5e_priv *priv = netdev_priv(netdev); 311 312 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); 313 } 314 315 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev, 316 const struct ethtool_link_ksettings *link_ksettings) 317 { 318 struct mlx5e_priv *priv = netdev_priv(netdev); 319 320 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); 321 } 322 323 static const struct ethtool_ops mlx5e_rep_ethtool_ops = { 324 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 325 ETHTOOL_COALESCE_MAX_FRAMES | 326 ETHTOOL_COALESCE_USE_ADAPTIVE, 327 .get_drvinfo = mlx5e_rep_get_drvinfo, 328 .get_link = ethtool_op_get_link, 329 .get_strings = mlx5e_rep_get_strings, 330 .get_sset_count = mlx5e_rep_get_sset_count, 331 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, 332 .get_ringparam = mlx5e_rep_get_ringparam, 333 .set_ringparam = mlx5e_rep_set_ringparam, 334 .get_channels = mlx5e_rep_get_channels, 335 .set_channels = mlx5e_rep_set_channels, 336 .get_coalesce = mlx5e_rep_get_coalesce, 337 .set_coalesce = mlx5e_rep_set_coalesce, 338 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, 339 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, 340 }; 341 342 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { 343 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 344 ETHTOOL_COALESCE_MAX_FRAMES | 345 ETHTOOL_COALESCE_USE_ADAPTIVE, 346 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo, 347 .get_link = ethtool_op_get_link, 348 .get_strings = mlx5e_rep_get_strings, 349 .get_sset_count = mlx5e_rep_get_sset_count, 350 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, 351 .get_ringparam = mlx5e_rep_get_ringparam, 352 .set_ringparam = mlx5e_rep_set_ringparam, 353 .get_channels = mlx5e_rep_get_channels, 354 .set_channels = mlx5e_rep_set_channels, 355 .get_coalesce = mlx5e_rep_get_coalesce, 356 .set_coalesce = mlx5e_rep_set_coalesce, 357 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings, 358 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings, 359 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, 360 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, 361 .get_rxfh = mlx5e_get_rxfh, 362 .set_rxfh = mlx5e_set_rxfh, 363 .get_rxnfc = mlx5e_get_rxnfc, 364 .set_rxnfc = mlx5e_set_rxnfc, 365 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam, 366 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, 367 }; 368 369 static void mlx5e_rep_get_port_parent_id(struct net_device *dev, 370 struct netdev_phys_item_id *ppid) 371 { 372 struct mlx5e_priv *priv; 373 u64 parent_id; 374 375 priv = netdev_priv(dev); 376 377 parent_id = mlx5_query_nic_system_image_guid(priv->mdev); 378 ppid->id_len = sizeof(parent_id); 379 memcpy(ppid->id, &parent_id, sizeof(parent_id)); 380 } 381 382 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, 383 struct mlx5_eswitch_rep *rep) 384 { 385 struct mlx5e_rep_sq *rep_sq, *tmp; 386 struct mlx5e_rep_priv *rpriv; 387 388 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 389 return; 390 391 rpriv = mlx5e_rep_to_rep_priv(rep); 392 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { 393 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); 394 list_del(&rep_sq->list); 395 kfree(rep_sq); 396 } 397 } 398 399 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, 400 struct mlx5_eswitch_rep *rep, 401 u32 *sqns_array, int sqns_num) 402 { 403 struct mlx5_flow_handle *flow_rule; 404 struct mlx5e_rep_priv *rpriv; 405 struct mlx5e_rep_sq *rep_sq; 406 int err; 407 int i; 408 409 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 410 return 0; 411 412 rpriv = mlx5e_rep_to_rep_priv(rep); 413 for (i = 0; i < sqns_num; i++) { 414 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); 415 if (!rep_sq) { 416 err = -ENOMEM; 417 goto out_err; 418 } 419 420 /* Add re-inject rule to the PF/representor sqs */ 421 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, 422 rep->vport, 423 sqns_array[i]); 424 if (IS_ERR(flow_rule)) { 425 err = PTR_ERR(flow_rule); 426 kfree(rep_sq); 427 goto out_err; 428 } 429 rep_sq->send_to_vport_rule = flow_rule; 430 list_add(&rep_sq->list, &rpriv->vport_sqs_list); 431 } 432 return 0; 433 434 out_err: 435 mlx5e_sqs2vport_stop(esw, rep); 436 return err; 437 } 438 439 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) 440 { 441 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 442 struct mlx5e_rep_priv *rpriv = priv->ppriv; 443 struct mlx5_eswitch_rep *rep = rpriv->rep; 444 struct mlx5e_channel *c; 445 int n, tc, num_sqs = 0; 446 int err = -ENOMEM; 447 u32 *sqs; 448 449 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL); 450 if (!sqs) 451 goto out; 452 453 for (n = 0; n < priv->channels.num; n++) { 454 c = priv->channels.c[n]; 455 for (tc = 0; tc < c->num_tc; tc++) 456 sqs[num_sqs++] = c->sq[tc].sqn; 457 } 458 459 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); 460 kfree(sqs); 461 462 out: 463 if (err) 464 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err); 465 return err; 466 } 467 468 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) 469 { 470 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 471 struct mlx5e_rep_priv *rpriv = priv->ppriv; 472 struct mlx5_eswitch_rep *rep = rpriv->rep; 473 474 mlx5e_sqs2vport_stop(esw, rep); 475 } 476 477 static int mlx5e_rep_open(struct net_device *dev) 478 { 479 struct mlx5e_priv *priv = netdev_priv(dev); 480 struct mlx5e_rep_priv *rpriv = priv->ppriv; 481 struct mlx5_eswitch_rep *rep = rpriv->rep; 482 int err; 483 484 mutex_lock(&priv->state_lock); 485 err = mlx5e_open_locked(dev); 486 if (err) 487 goto unlock; 488 489 if (!mlx5_modify_vport_admin_state(priv->mdev, 490 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 491 rep->vport, 1, 492 MLX5_VPORT_ADMIN_STATE_UP)) 493 netif_carrier_on(dev); 494 495 unlock: 496 mutex_unlock(&priv->state_lock); 497 return err; 498 } 499 500 static int mlx5e_rep_close(struct net_device *dev) 501 { 502 struct mlx5e_priv *priv = netdev_priv(dev); 503 struct mlx5e_rep_priv *rpriv = priv->ppriv; 504 struct mlx5_eswitch_rep *rep = rpriv->rep; 505 int ret; 506 507 mutex_lock(&priv->state_lock); 508 mlx5_modify_vport_admin_state(priv->mdev, 509 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 510 rep->vport, 1, 511 MLX5_VPORT_ADMIN_STATE_DOWN); 512 ret = mlx5e_close_locked(dev); 513 mutex_unlock(&priv->state_lock); 514 return ret; 515 } 516 517 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) 518 { 519 struct mlx5e_rep_priv *rpriv = priv->ppriv; 520 struct mlx5_eswitch_rep *rep; 521 522 if (!MLX5_ESWITCH_MANAGER(priv->mdev)) 523 return false; 524 525 if (!rpriv) /* non vport rep mlx5e instances don't use this field */ 526 return false; 527 528 rep = rpriv->rep; 529 return (rep->vport == MLX5_VPORT_UPLINK); 530 } 531 532 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) 533 { 534 switch (attr_id) { 535 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 536 return true; 537 } 538 539 return false; 540 } 541 542 static int 543 mlx5e_get_sw_stats64(const struct net_device *dev, 544 struct rtnl_link_stats64 *stats) 545 { 546 struct mlx5e_priv *priv = netdev_priv(dev); 547 548 mlx5e_fold_sw_stats64(priv, stats); 549 return 0; 550 } 551 552 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, 553 void *sp) 554 { 555 switch (attr_id) { 556 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 557 return mlx5e_get_sw_stats64(dev, sp); 558 } 559 560 return -EINVAL; 561 } 562 563 static void 564 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 565 { 566 struct mlx5e_priv *priv = netdev_priv(dev); 567 568 /* update HW stats in background for next time */ 569 mlx5e_queue_update_stats(priv); 570 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); 571 } 572 573 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) 574 { 575 return mlx5e_change_mtu(netdev, new_mtu, NULL); 576 } 577 578 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) 579 { 580 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); 581 } 582 583 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) 584 { 585 struct sockaddr *saddr = addr; 586 587 if (!is_valid_ether_addr(saddr->sa_data)) 588 return -EADDRNOTAVAIL; 589 590 ether_addr_copy(netdev->dev_addr, saddr->sa_data); 591 return 0; 592 } 593 594 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 595 __be16 vlan_proto) 596 { 597 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); 598 599 if (vlan != 0) 600 return -EOPNOTSUPP; 601 602 /* allow setting 0-vid for compatibility with libvirt */ 603 return 0; 604 } 605 606 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev) 607 { 608 struct mlx5e_priv *priv = netdev_priv(dev); 609 struct mlx5e_rep_priv *rpriv = priv->ppriv; 610 611 return &rpriv->dl_port; 612 } 613 614 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier) 615 { 616 struct mlx5e_priv *priv = netdev_priv(dev); 617 struct mlx5e_rep_priv *rpriv = priv->ppriv; 618 struct mlx5_eswitch_rep *rep = rpriv->rep; 619 int err; 620 621 if (new_carrier) { 622 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 623 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP); 624 if (err) 625 return err; 626 netif_carrier_on(dev); 627 } else { 628 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 629 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN); 630 if (err) 631 return err; 632 netif_carrier_off(dev); 633 } 634 return 0; 635 } 636 637 static const struct net_device_ops mlx5e_netdev_ops_rep = { 638 .ndo_open = mlx5e_rep_open, 639 .ndo_stop = mlx5e_rep_close, 640 .ndo_start_xmit = mlx5e_xmit, 641 .ndo_setup_tc = mlx5e_rep_setup_tc, 642 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, 643 .ndo_get_stats64 = mlx5e_rep_get_stats, 644 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, 645 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, 646 .ndo_change_mtu = mlx5e_rep_change_mtu, 647 .ndo_change_carrier = mlx5e_rep_change_carrier, 648 }; 649 650 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { 651 .ndo_open = mlx5e_open, 652 .ndo_stop = mlx5e_close, 653 .ndo_start_xmit = mlx5e_xmit, 654 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac, 655 .ndo_setup_tc = mlx5e_rep_setup_tc, 656 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, 657 .ndo_get_stats64 = mlx5e_get_stats, 658 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, 659 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, 660 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, 661 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 662 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 663 .ndo_features_check = mlx5e_features_check, 664 .ndo_set_vf_mac = mlx5e_set_vf_mac, 665 .ndo_set_vf_rate = mlx5e_set_vf_rate, 666 .ndo_get_vf_config = mlx5e_get_vf_config, 667 .ndo_get_vf_stats = mlx5e_get_vf_stats, 668 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, 669 .ndo_set_features = mlx5e_set_features, 670 }; 671 672 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev) 673 { 674 return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; 675 } 676 677 bool mlx5e_eswitch_vf_rep(struct net_device *netdev) 678 { 679 return netdev->netdev_ops == &mlx5e_netdev_ops_rep; 680 } 681 682 static void mlx5e_build_rep_params(struct net_device *netdev) 683 { 684 struct mlx5e_priv *priv = netdev_priv(netdev); 685 struct mlx5e_rep_priv *rpriv = priv->ppriv; 686 struct mlx5_eswitch_rep *rep = rpriv->rep; 687 struct mlx5_core_dev *mdev = priv->mdev; 688 struct mlx5e_params *params; 689 690 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 691 MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 692 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 693 694 params = &priv->channels.params; 695 params->hard_mtu = MLX5E_ETH_HARD_MTU; 696 params->sw_mtu = netdev->mtu; 697 698 /* SQ */ 699 if (rep->vport == MLX5_VPORT_UPLINK) 700 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 701 else 702 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; 703 704 /* RQ */ 705 mlx5e_build_rq_params(mdev, params); 706 707 /* CQ moderation params */ 708 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 709 mlx5e_set_rx_cq_mode_params(params, cq_period_mode); 710 711 params->num_tc = 1; 712 params->tunneled_offload_en = false; 713 714 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); 715 716 /* RSS */ 717 mlx5e_build_rss_params(&priv->rss_params, params->num_channels); 718 } 719 720 static void mlx5e_build_rep_netdev(struct net_device *netdev) 721 { 722 struct mlx5e_priv *priv = netdev_priv(netdev); 723 struct mlx5e_rep_priv *rpriv = priv->ppriv; 724 struct mlx5_eswitch_rep *rep = rpriv->rep; 725 struct mlx5_core_dev *mdev = priv->mdev; 726 727 SET_NETDEV_DEV(netdev, mdev->device); 728 if (rep->vport == MLX5_VPORT_UPLINK) { 729 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; 730 /* we want a persistent mac for the uplink rep */ 731 mlx5_query_mac_address(mdev, netdev->dev_addr); 732 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; 733 mlx5e_vxlan_set_netdev_info(priv); 734 mlx5e_dcbnl_build_rep_netdev(netdev); 735 } else { 736 netdev->netdev_ops = &mlx5e_netdev_ops_rep; 737 eth_hw_addr_random(netdev); 738 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; 739 } 740 741 netdev->watchdog_timeo = 15 * HZ; 742 743 netdev->features |= NETIF_F_NETNS_LOCAL; 744 745 netdev->hw_features |= NETIF_F_HW_TC; 746 netdev->hw_features |= NETIF_F_SG; 747 netdev->hw_features |= NETIF_F_IP_CSUM; 748 netdev->hw_features |= NETIF_F_IPV6_CSUM; 749 netdev->hw_features |= NETIF_F_GRO; 750 netdev->hw_features |= NETIF_F_TSO; 751 netdev->hw_features |= NETIF_F_TSO6; 752 netdev->hw_features |= NETIF_F_RXCSUM; 753 754 if (rep->vport == MLX5_VPORT_UPLINK) 755 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 756 else 757 netdev->features |= NETIF_F_VLAN_CHALLENGED; 758 759 netdev->features |= netdev->hw_features; 760 } 761 762 static int mlx5e_init_rep(struct mlx5_core_dev *mdev, 763 struct net_device *netdev, 764 const struct mlx5e_profile *profile, 765 void *ppriv) 766 { 767 struct mlx5e_priv *priv = netdev_priv(netdev); 768 int err; 769 770 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); 771 if (err) 772 return err; 773 774 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; 775 776 mlx5e_build_rep_params(netdev); 777 mlx5e_build_rep_netdev(netdev); 778 779 mlx5e_timestamp_init(priv); 780 781 return 0; 782 } 783 784 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) 785 { 786 mlx5e_netdev_cleanup(priv->netdev, priv); 787 } 788 789 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) 790 { 791 struct mlx5e_rep_priv *rpriv = priv->ppriv; 792 struct mlx5_eswitch_rep *rep = rpriv->rep; 793 struct ttc_params ttc_params = {}; 794 int tt, err; 795 796 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 797 MLX5_FLOW_NAMESPACE_KERNEL); 798 799 /* The inner_ttc in the ttc params is intentionally not set */ 800 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn; 801 mlx5e_set_ttc_ft_params(&ttc_params); 802 803 if (rep->vport != MLX5_VPORT_UPLINK) 804 /* To give uplik rep TTC a lower level for chaining from root ft */ 805 ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1; 806 807 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 808 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; 809 810 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); 811 if (err) { 812 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); 813 return err; 814 } 815 return 0; 816 } 817 818 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) 819 { 820 struct mlx5e_rep_priv *rpriv = priv->ppriv; 821 struct mlx5_eswitch_rep *rep = rpriv->rep; 822 struct mlx5_flow_table_attr ft_attr = {}; 823 struct mlx5_flow_namespace *ns; 824 int err = 0; 825 826 if (rep->vport != MLX5_VPORT_UPLINK) { 827 /* non uplik reps will skip any bypass tables and go directly to 828 * their own ttc 829 */ 830 rpriv->root_ft = priv->fs.ttc.ft.t; 831 return 0; 832 } 833 834 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */ 835 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS); 836 if (!ns) { 837 netdev_err(priv->netdev, "Failed to get reps offloads namespace\n"); 838 return -EOPNOTSUPP; 839 } 840 841 ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */ 842 ft_attr.prio = 1; 843 ft_attr.level = 1; 844 845 rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr); 846 if (IS_ERR(rpriv->root_ft)) { 847 err = PTR_ERR(rpriv->root_ft); 848 rpriv->root_ft = NULL; 849 } 850 851 return err; 852 } 853 854 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv) 855 { 856 struct mlx5e_rep_priv *rpriv = priv->ppriv; 857 struct mlx5_eswitch_rep *rep = rpriv->rep; 858 859 if (rep->vport != MLX5_VPORT_UPLINK) 860 return; 861 mlx5_destroy_flow_table(rpriv->root_ft); 862 } 863 864 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) 865 { 866 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 867 struct mlx5e_rep_priv *rpriv = priv->ppriv; 868 struct mlx5_eswitch_rep *rep = rpriv->rep; 869 struct mlx5_flow_handle *flow_rule; 870 struct mlx5_flow_destination dest; 871 872 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 873 dest.ft = rpriv->root_ft; 874 875 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest); 876 if (IS_ERR(flow_rule)) 877 return PTR_ERR(flow_rule); 878 rpriv->vport_rx_rule = flow_rule; 879 return 0; 880 } 881 882 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv) 883 { 884 struct mlx5e_rep_priv *rpriv = priv->ppriv; 885 886 if (!rpriv->vport_rx_rule) 887 return; 888 889 mlx5_del_flow_rules(rpriv->vport_rx_rule); 890 rpriv->vport_rx_rule = NULL; 891 } 892 893 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) 894 { 895 rep_vport_rx_rule_destroy(priv); 896 897 return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv); 898 } 899 900 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) 901 { 902 struct mlx5_core_dev *mdev = priv->mdev; 903 int err; 904 905 mlx5e_init_l2_addr(priv); 906 907 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 908 if (err) { 909 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 910 return err; 911 } 912 913 err = mlx5e_create_indirect_rqt(priv); 914 if (err) 915 goto err_close_drop_rq; 916 917 err = mlx5e_create_direct_rqts(priv, priv->direct_tir); 918 if (err) 919 goto err_destroy_indirect_rqts; 920 921 err = mlx5e_create_indirect_tirs(priv, false); 922 if (err) 923 goto err_destroy_direct_rqts; 924 925 err = mlx5e_create_direct_tirs(priv, priv->direct_tir); 926 if (err) 927 goto err_destroy_indirect_tirs; 928 929 err = mlx5e_create_rep_ttc_table(priv); 930 if (err) 931 goto err_destroy_direct_tirs; 932 933 err = mlx5e_create_rep_root_ft(priv); 934 if (err) 935 goto err_destroy_ttc_table; 936 937 err = mlx5e_create_rep_vport_rx_rule(priv); 938 if (err) 939 goto err_destroy_root_ft; 940 941 mlx5e_ethtool_init_steering(priv); 942 943 return 0; 944 945 err_destroy_root_ft: 946 mlx5e_destroy_rep_root_ft(priv); 947 err_destroy_ttc_table: 948 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 949 err_destroy_direct_tirs: 950 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 951 err_destroy_indirect_tirs: 952 mlx5e_destroy_indirect_tirs(priv); 953 err_destroy_direct_rqts: 954 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 955 err_destroy_indirect_rqts: 956 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 957 err_close_drop_rq: 958 mlx5e_close_drop_rq(&priv->drop_rq); 959 return err; 960 } 961 962 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) 963 { 964 mlx5e_ethtool_cleanup_steering(priv); 965 rep_vport_rx_rule_destroy(priv); 966 mlx5e_destroy_rep_root_ft(priv); 967 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 968 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 969 mlx5e_destroy_indirect_tirs(priv); 970 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 971 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 972 mlx5e_close_drop_rq(&priv->drop_rq); 973 } 974 975 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) 976 { 977 mlx5e_create_q_counters(priv); 978 return mlx5e_init_rep_rx(priv); 979 } 980 981 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) 982 { 983 mlx5e_cleanup_rep_rx(priv); 984 mlx5e_destroy_q_counters(priv); 985 } 986 987 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 988 { 989 struct mlx5_rep_uplink_priv *uplink_priv; 990 struct net_device *netdev; 991 struct mlx5e_priv *priv; 992 int err; 993 994 netdev = rpriv->netdev; 995 priv = netdev_priv(netdev); 996 uplink_priv = &rpriv->uplink_priv; 997 998 err = mlx5e_rep_tc_init(rpriv); 999 if (err) 1000 return err; 1001 1002 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); 1003 1004 mlx5e_rep_bond_init(rpriv); 1005 err = mlx5e_rep_tc_netdevice_event_register(rpriv); 1006 if (err) { 1007 mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n", 1008 err); 1009 goto err_event_reg; 1010 } 1011 1012 return 0; 1013 1014 err_event_reg: 1015 mlx5e_rep_bond_cleanup(rpriv); 1016 mlx5e_rep_tc_cleanup(rpriv); 1017 return err; 1018 } 1019 1020 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) 1021 { 1022 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1023 int err; 1024 1025 err = mlx5e_create_tises(priv); 1026 if (err) { 1027 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); 1028 return err; 1029 } 1030 1031 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { 1032 err = mlx5e_init_uplink_rep_tx(rpriv); 1033 if (err) 1034 goto destroy_tises; 1035 } 1036 1037 return 0; 1038 1039 destroy_tises: 1040 mlx5e_destroy_tises(priv); 1041 return err; 1042 } 1043 1044 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 1045 { 1046 mlx5e_rep_tc_netdevice_event_unregister(rpriv); 1047 mlx5e_rep_bond_cleanup(rpriv); 1048 mlx5e_rep_tc_cleanup(rpriv); 1049 } 1050 1051 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) 1052 { 1053 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1054 1055 mlx5e_destroy_tises(priv); 1056 1057 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) 1058 mlx5e_cleanup_uplink_rep_tx(rpriv); 1059 } 1060 1061 static void mlx5e_rep_enable(struct mlx5e_priv *priv) 1062 { 1063 mlx5e_set_netdev_mtu_boundaries(priv); 1064 } 1065 1066 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) 1067 { 1068 return 0; 1069 } 1070 1071 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) 1072 { 1073 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); 1074 1075 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { 1076 struct mlx5_eqe *eqe = data; 1077 1078 switch (eqe->sub_type) { 1079 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 1080 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 1081 queue_work(priv->wq, &priv->update_carrier_work); 1082 break; 1083 default: 1084 return NOTIFY_DONE; 1085 } 1086 1087 return NOTIFY_OK; 1088 } 1089 1090 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) 1091 return mlx5e_rep_tc_event_port_affinity(priv); 1092 1093 return NOTIFY_DONE; 1094 } 1095 1096 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) 1097 { 1098 struct net_device *netdev = priv->netdev; 1099 struct mlx5_core_dev *mdev = priv->mdev; 1100 u16 max_mtu; 1101 1102 netdev->min_mtu = ETH_MIN_MTU; 1103 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); 1104 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); 1105 mlx5e_set_dev_port_mtu(priv); 1106 1107 mlx5e_rep_tc_enable(priv); 1108 1109 mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, 1110 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); 1111 mlx5_lag_add(mdev, netdev); 1112 priv->events_nb.notifier_call = uplink_rep_async_event; 1113 mlx5_notifier_register(mdev, &priv->events_nb); 1114 mlx5e_dcbnl_initialize(priv); 1115 mlx5e_dcbnl_init_app(priv); 1116 } 1117 1118 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) 1119 { 1120 struct mlx5_core_dev *mdev = priv->mdev; 1121 1122 mlx5e_dcbnl_delete_app(priv); 1123 mlx5_notifier_unregister(mdev, &priv->events_nb); 1124 mlx5e_rep_tc_disable(priv); 1125 mlx5_lag_remove(mdev); 1126 } 1127 1128 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0); 1129 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS); 1130 1131 /* The stats groups order is opposite to the update_stats() order calls */ 1132 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = { 1133 &MLX5E_STATS_GRP(sw_rep), 1134 &MLX5E_STATS_GRP(vport_rep), 1135 }; 1136 1137 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv) 1138 { 1139 return ARRAY_SIZE(mlx5e_rep_stats_grps); 1140 } 1141 1142 /* The stats groups order is opposite to the update_stats() order calls */ 1143 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { 1144 &MLX5E_STATS_GRP(sw), 1145 &MLX5E_STATS_GRP(qcnt), 1146 &MLX5E_STATS_GRP(vnic_env), 1147 &MLX5E_STATS_GRP(vport), 1148 &MLX5E_STATS_GRP(802_3), 1149 &MLX5E_STATS_GRP(2863), 1150 &MLX5E_STATS_GRP(2819), 1151 &MLX5E_STATS_GRP(phy), 1152 &MLX5E_STATS_GRP(eth_ext), 1153 &MLX5E_STATS_GRP(pcie), 1154 &MLX5E_STATS_GRP(per_prio), 1155 &MLX5E_STATS_GRP(pme), 1156 &MLX5E_STATS_GRP(channels), 1157 &MLX5E_STATS_GRP(per_port_buff_congest), 1158 }; 1159 1160 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) 1161 { 1162 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps); 1163 } 1164 1165 static const struct mlx5e_profile mlx5e_rep_profile = { 1166 .init = mlx5e_init_rep, 1167 .cleanup = mlx5e_cleanup_rep, 1168 .init_rx = mlx5e_init_rep_rx, 1169 .cleanup_rx = mlx5e_cleanup_rep_rx, 1170 .init_tx = mlx5e_init_rep_tx, 1171 .cleanup_tx = mlx5e_cleanup_rep_tx, 1172 .enable = mlx5e_rep_enable, 1173 .update_rx = mlx5e_update_rep_rx, 1174 .update_stats = mlx5e_update_ndo_stats, 1175 .rx_handlers = &mlx5e_rx_handlers_rep, 1176 .max_tc = 1, 1177 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 1178 .stats_grps = mlx5e_rep_stats_grps, 1179 .stats_grps_num = mlx5e_rep_stats_grps_num, 1180 }; 1181 1182 static const struct mlx5e_profile mlx5e_uplink_rep_profile = { 1183 .init = mlx5e_init_rep, 1184 .cleanup = mlx5e_cleanup_rep, 1185 .init_rx = mlx5e_init_ul_rep_rx, 1186 .cleanup_rx = mlx5e_cleanup_ul_rep_rx, 1187 .init_tx = mlx5e_init_rep_tx, 1188 .cleanup_tx = mlx5e_cleanup_rep_tx, 1189 .enable = mlx5e_uplink_rep_enable, 1190 .disable = mlx5e_uplink_rep_disable, 1191 .update_rx = mlx5e_update_rep_rx, 1192 .update_stats = mlx5e_update_ndo_stats, 1193 .update_carrier = mlx5e_update_carrier, 1194 .rx_handlers = &mlx5e_rx_handlers_rep, 1195 .max_tc = MLX5E_MAX_NUM_TC, 1196 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 1197 .stats_grps = mlx5e_ul_rep_stats_grps, 1198 .stats_grps_num = mlx5e_ul_rep_stats_grps_num, 1199 }; 1200 1201 static bool 1202 is_devlink_port_supported(const struct mlx5_core_dev *dev, 1203 const struct mlx5e_rep_priv *rpriv) 1204 { 1205 return rpriv->rep->vport == MLX5_VPORT_UPLINK || 1206 rpriv->rep->vport == MLX5_VPORT_PF || 1207 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport); 1208 } 1209 1210 static int register_devlink_port(struct mlx5_core_dev *dev, 1211 struct mlx5e_rep_priv *rpriv) 1212 { 1213 struct devlink *devlink = priv_to_devlink(dev); 1214 struct mlx5_eswitch_rep *rep = rpriv->rep; 1215 struct devlink_port_attrs attrs = {}; 1216 struct netdev_phys_item_id ppid = {}; 1217 unsigned int dl_port_index = 0; 1218 u16 pfnum; 1219 1220 if (!is_devlink_port_supported(dev, rpriv)) 1221 return 0; 1222 1223 mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); 1224 dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport); 1225 pfnum = PCI_FUNC(dev->pdev->devfn); 1226 if (rep->vport == MLX5_VPORT_UPLINK) { 1227 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 1228 attrs.phys.port_number = pfnum; 1229 memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len); 1230 attrs.switch_id.id_len = ppid.id_len; 1231 devlink_port_attrs_set(&rpriv->dl_port, &attrs); 1232 } else if (rep->vport == MLX5_VPORT_PF) { 1233 memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len); 1234 rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len; 1235 devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum); 1236 } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) { 1237 memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len); 1238 rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len; 1239 devlink_port_attrs_pci_vf_set(&rpriv->dl_port, 1240 pfnum, rep->vport - 1); 1241 } 1242 return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index); 1243 } 1244 1245 static void unregister_devlink_port(struct mlx5_core_dev *dev, 1246 struct mlx5e_rep_priv *rpriv) 1247 { 1248 if (is_devlink_port_supported(dev, rpriv)) 1249 devlink_port_unregister(&rpriv->dl_port); 1250 } 1251 1252 /* e-Switch vport representors */ 1253 static int 1254 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 1255 { 1256 const struct mlx5e_profile *profile; 1257 struct mlx5e_rep_priv *rpriv; 1258 struct net_device *netdev; 1259 int nch, err; 1260 1261 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); 1262 if (!rpriv) 1263 return -ENOMEM; 1264 1265 /* rpriv->rep to be looked up when profile->init() is called */ 1266 rpriv->rep = rep; 1267 1268 nch = mlx5e_get_max_num_channels(dev); 1269 profile = (rep->vport == MLX5_VPORT_UPLINK) ? 1270 &mlx5e_uplink_rep_profile : &mlx5e_rep_profile; 1271 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); 1272 if (!netdev) { 1273 mlx5_core_warn(dev, 1274 "Failed to create representor netdev for vport %d\n", 1275 rep->vport); 1276 kfree(rpriv); 1277 return -EINVAL; 1278 } 1279 1280 dev_net_set(netdev, mlx5_core_net(dev)); 1281 rpriv->netdev = netdev; 1282 rep->rep_data[REP_ETH].priv = rpriv; 1283 INIT_LIST_HEAD(&rpriv->vport_sqs_list); 1284 1285 if (rep->vport == MLX5_VPORT_UPLINK) { 1286 err = mlx5e_create_mdev_resources(dev); 1287 if (err) 1288 goto err_destroy_netdev; 1289 } 1290 1291 err = mlx5e_attach_netdev(netdev_priv(netdev)); 1292 if (err) { 1293 netdev_warn(netdev, 1294 "Failed to attach representor netdev for vport %d\n", 1295 rep->vport); 1296 goto err_destroy_mdev_resources; 1297 } 1298 1299 err = mlx5e_rep_neigh_init(rpriv); 1300 if (err) { 1301 netdev_warn(netdev, 1302 "Failed to initialized neighbours handling for vport %d\n", 1303 rep->vport); 1304 goto err_detach_netdev; 1305 } 1306 1307 err = register_devlink_port(dev, rpriv); 1308 if (err) { 1309 netdev_warn(netdev, "Failed to register devlink port %d\n", 1310 rep->vport); 1311 goto err_neigh_cleanup; 1312 } 1313 1314 err = register_netdev(netdev); 1315 if (err) { 1316 netdev_warn(netdev, 1317 "Failed to register representor netdev for vport %d\n", 1318 rep->vport); 1319 goto err_devlink_cleanup; 1320 } 1321 1322 if (is_devlink_port_supported(dev, rpriv)) 1323 devlink_port_type_eth_set(&rpriv->dl_port, netdev); 1324 return 0; 1325 1326 err_devlink_cleanup: 1327 unregister_devlink_port(dev, rpriv); 1328 1329 err_neigh_cleanup: 1330 mlx5e_rep_neigh_cleanup(rpriv); 1331 1332 err_detach_netdev: 1333 mlx5e_detach_netdev(netdev_priv(netdev)); 1334 1335 err_destroy_mdev_resources: 1336 if (rep->vport == MLX5_VPORT_UPLINK) 1337 mlx5e_destroy_mdev_resources(dev); 1338 1339 err_destroy_netdev: 1340 mlx5e_destroy_netdev(netdev_priv(netdev)); 1341 kfree(rpriv); 1342 return err; 1343 } 1344 1345 static void 1346 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) 1347 { 1348 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); 1349 struct net_device *netdev = rpriv->netdev; 1350 struct mlx5e_priv *priv = netdev_priv(netdev); 1351 struct mlx5_core_dev *dev = priv->mdev; 1352 void *ppriv = priv->ppriv; 1353 1354 if (is_devlink_port_supported(dev, rpriv)) 1355 devlink_port_type_clear(&rpriv->dl_port); 1356 unregister_netdev(netdev); 1357 unregister_devlink_port(dev, rpriv); 1358 mlx5e_rep_neigh_cleanup(rpriv); 1359 mlx5e_detach_netdev(priv); 1360 if (rep->vport == MLX5_VPORT_UPLINK) 1361 mlx5e_destroy_mdev_resources(priv->mdev); 1362 mlx5e_destroy_netdev(priv); 1363 kfree(ppriv); /* mlx5e_rep_priv */ 1364 } 1365 1366 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) 1367 { 1368 struct mlx5e_rep_priv *rpriv; 1369 1370 rpriv = mlx5e_rep_to_rep_priv(rep); 1371 1372 return rpriv->netdev; 1373 } 1374 1375 static const struct mlx5_eswitch_rep_ops rep_ops = { 1376 .load = mlx5e_vport_rep_load, 1377 .unload = mlx5e_vport_rep_unload, 1378 .get_proto_dev = mlx5e_vport_rep_get_proto_dev 1379 }; 1380 1381 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) 1382 { 1383 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1384 1385 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH); 1386 } 1387 1388 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) 1389 { 1390 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1391 1392 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); 1393 } 1394