1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
39 
40 #include "eswitch.h"
41 #include "en.h"
42 #include "en_rep.h"
43 #include "en/params.h"
44 #include "en/txrx.h"
45 #include "en_tc.h"
46 #include "en/rep/tc.h"
47 #include "en/rep/neigh.h"
48 #include "en/rep/bridge.h"
49 #include "en/devlink.h"
50 #include "fs_core.h"
51 #include "lib/mlx5.h"
52 #include "lib/devcom.h"
53 #include "lib/vxlan.h"
54 #define CREATE_TRACE_POINTS
55 #include "diag/en_rep_tracepoint.h"
56 #include "en_accel/ipsec.h"
57 #include "en/tc/int_port.h"
58 
59 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
60 	max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
61 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
62 
63 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
64 
65 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
66 				  struct ethtool_drvinfo *drvinfo)
67 {
68 	struct mlx5e_priv *priv = netdev_priv(dev);
69 	struct mlx5_core_dev *mdev = priv->mdev;
70 
71 	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
72 		sizeof(drvinfo->driver));
73 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
74 		 "%d.%d.%04d (%.16s)",
75 		 fw_rev_maj(mdev), fw_rev_min(mdev),
76 		 fw_rev_sub(mdev), mdev->board_id);
77 }
78 
79 static const struct counter_desc sw_rep_stats_desc[] = {
80 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
81 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
82 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
83 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
84 };
85 
86 struct vport_stats {
87 	u64 vport_rx_packets;
88 	u64 vport_tx_packets;
89 	u64 vport_rx_bytes;
90 	u64 vport_tx_bytes;
91 };
92 
93 static const struct counter_desc vport_rep_stats_desc[] = {
94 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
95 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
96 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
97 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
98 };
99 
100 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
101 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
102 
103 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
104 {
105 	return NUM_VPORT_REP_SW_COUNTERS;
106 }
107 
108 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
109 {
110 	int i;
111 
112 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
113 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
114 		       sw_rep_stats_desc[i].format);
115 	return idx;
116 }
117 
118 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
119 {
120 	int i;
121 
122 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
123 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
124 						   sw_rep_stats_desc, i);
125 	return idx;
126 }
127 
128 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
129 {
130 	struct mlx5e_sw_stats *s = &priv->stats.sw;
131 	struct rtnl_link_stats64 stats64 = {};
132 
133 	memset(s, 0, sizeof(*s));
134 	mlx5e_fold_sw_stats64(priv, &stats64);
135 
136 	s->rx_packets = stats64.rx_packets;
137 	s->rx_bytes   = stats64.rx_bytes;
138 	s->tx_packets = stats64.tx_packets;
139 	s->tx_bytes   = stats64.tx_bytes;
140 	s->tx_queue_dropped = stats64.tx_dropped;
141 }
142 
143 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
144 {
145 	return NUM_VPORT_REP_HW_COUNTERS;
146 }
147 
148 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
149 {
150 	int i;
151 
152 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
153 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
154 	return idx;
155 }
156 
157 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
158 {
159 	int i;
160 
161 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
162 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
163 						   vport_rep_stats_desc, i);
164 	return idx;
165 }
166 
167 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
168 {
169 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
170 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
171 	struct mlx5_eswitch_rep *rep = rpriv->rep;
172 	struct rtnl_link_stats64 *vport_stats;
173 	struct ifla_vf_stats vf_stats;
174 	int err;
175 
176 	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
177 	if (err) {
178 		netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
179 			    rep->vport, err);
180 		return;
181 	}
182 
183 	vport_stats = &priv->stats.vf_vport;
184 	/* flip tx/rx as we are reporting the counters for the switch vport */
185 	vport_stats->rx_packets = vf_stats.tx_packets;
186 	vport_stats->rx_bytes   = vf_stats.tx_bytes;
187 	vport_stats->tx_packets = vf_stats.rx_packets;
188 	vport_stats->tx_bytes   = vf_stats.rx_bytes;
189 }
190 
191 static void mlx5e_rep_get_strings(struct net_device *dev,
192 				  u32 stringset, uint8_t *data)
193 {
194 	struct mlx5e_priv *priv = netdev_priv(dev);
195 
196 	switch (stringset) {
197 	case ETH_SS_STATS:
198 		mlx5e_stats_fill_strings(priv, data);
199 		break;
200 	}
201 }
202 
203 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
204 					struct ethtool_stats *stats, u64 *data)
205 {
206 	struct mlx5e_priv *priv = netdev_priv(dev);
207 
208 	mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
209 }
210 
211 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
212 {
213 	struct mlx5e_priv *priv = netdev_priv(dev);
214 
215 	switch (sset) {
216 	case ETH_SS_STATS:
217 		return mlx5e_stats_total_num(priv);
218 	default:
219 		return -EOPNOTSUPP;
220 	}
221 }
222 
223 static void
224 mlx5e_rep_get_ringparam(struct net_device *dev,
225 			struct ethtool_ringparam *param,
226 			struct kernel_ethtool_ringparam *kernel_param,
227 			struct netlink_ext_ack *extack)
228 {
229 	struct mlx5e_priv *priv = netdev_priv(dev);
230 
231 	mlx5e_ethtool_get_ringparam(priv, param);
232 }
233 
234 static int
235 mlx5e_rep_set_ringparam(struct net_device *dev,
236 			struct ethtool_ringparam *param,
237 			struct kernel_ethtool_ringparam *kernel_param,
238 			struct netlink_ext_ack *extack)
239 {
240 	struct mlx5e_priv *priv = netdev_priv(dev);
241 
242 	return mlx5e_ethtool_set_ringparam(priv, param);
243 }
244 
245 static void mlx5e_rep_get_channels(struct net_device *dev,
246 				   struct ethtool_channels *ch)
247 {
248 	struct mlx5e_priv *priv = netdev_priv(dev);
249 
250 	mlx5e_ethtool_get_channels(priv, ch);
251 }
252 
253 static int mlx5e_rep_set_channels(struct net_device *dev,
254 				  struct ethtool_channels *ch)
255 {
256 	struct mlx5e_priv *priv = netdev_priv(dev);
257 
258 	return mlx5e_ethtool_set_channels(priv, ch);
259 }
260 
261 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
262 				  struct ethtool_coalesce *coal,
263 				  struct kernel_ethtool_coalesce *kernel_coal,
264 				  struct netlink_ext_ack *extack)
265 {
266 	struct mlx5e_priv *priv = netdev_priv(netdev);
267 
268 	return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
269 }
270 
271 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
272 				  struct ethtool_coalesce *coal,
273 				  struct kernel_ethtool_coalesce *kernel_coal,
274 				  struct netlink_ext_ack *extack)
275 {
276 	struct mlx5e_priv *priv = netdev_priv(netdev);
277 
278 	return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
279 }
280 
281 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
282 {
283 	struct mlx5e_priv *priv = netdev_priv(netdev);
284 
285 	return mlx5e_ethtool_get_rxfh_key_size(priv);
286 }
287 
288 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
289 {
290 	struct mlx5e_priv *priv = netdev_priv(netdev);
291 
292 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
293 }
294 
295 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
296 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
297 				     ETHTOOL_COALESCE_MAX_FRAMES |
298 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
299 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
300 	.get_link	   = ethtool_op_get_link,
301 	.get_strings       = mlx5e_rep_get_strings,
302 	.get_sset_count    = mlx5e_rep_get_sset_count,
303 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
304 	.get_ringparam     = mlx5e_rep_get_ringparam,
305 	.set_ringparam     = mlx5e_rep_set_ringparam,
306 	.get_channels      = mlx5e_rep_get_channels,
307 	.set_channels      = mlx5e_rep_set_channels,
308 	.get_coalesce      = mlx5e_rep_get_coalesce,
309 	.set_coalesce      = mlx5e_rep_set_coalesce,
310 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
311 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
312 };
313 
314 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
315 				 struct mlx5_eswitch_rep *rep)
316 {
317 	struct mlx5e_rep_sq *rep_sq, *tmp;
318 	struct mlx5e_rep_priv *rpriv;
319 
320 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
321 		return;
322 
323 	rpriv = mlx5e_rep_to_rep_priv(rep);
324 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
325 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
326 		if (rep_sq->send_to_vport_rule_peer)
327 			mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
328 		list_del(&rep_sq->list);
329 		kfree(rep_sq);
330 	}
331 }
332 
333 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
334 				 struct mlx5_eswitch_rep *rep,
335 				 u32 *sqns_array, int sqns_num)
336 {
337 	struct mlx5_eswitch *peer_esw = NULL;
338 	struct mlx5_flow_handle *flow_rule;
339 	struct mlx5e_rep_priv *rpriv;
340 	struct mlx5e_rep_sq *rep_sq;
341 	int err;
342 	int i;
343 
344 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
345 		return 0;
346 
347 	rpriv = mlx5e_rep_to_rep_priv(rep);
348 	if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
349 		peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
350 						     MLX5_DEVCOM_ESW_OFFLOADS);
351 
352 	for (i = 0; i < sqns_num; i++) {
353 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
354 		if (!rep_sq) {
355 			err = -ENOMEM;
356 			goto out_err;
357 		}
358 
359 		/* Add re-inject rule to the PF/representor sqs */
360 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
361 								sqns_array[i]);
362 		if (IS_ERR(flow_rule)) {
363 			err = PTR_ERR(flow_rule);
364 			kfree(rep_sq);
365 			goto out_err;
366 		}
367 		rep_sq->send_to_vport_rule = flow_rule;
368 		rep_sq->sqn = sqns_array[i];
369 
370 		if (peer_esw) {
371 			flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
372 									rep, sqns_array[i]);
373 			if (IS_ERR(flow_rule)) {
374 				err = PTR_ERR(flow_rule);
375 				mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
376 				kfree(rep_sq);
377 				goto out_err;
378 			}
379 			rep_sq->send_to_vport_rule_peer = flow_rule;
380 		}
381 
382 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
383 	}
384 
385 	if (peer_esw)
386 		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
387 
388 	return 0;
389 
390 out_err:
391 	mlx5e_sqs2vport_stop(esw, rep);
392 
393 	if (peer_esw)
394 		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
395 
396 	return err;
397 }
398 
399 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
400 {
401 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
402 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
403 	struct mlx5_eswitch_rep *rep = rpriv->rep;
404 	struct mlx5e_channel *c;
405 	int n, tc, num_sqs = 0;
406 	int err = -ENOMEM;
407 	u32 *sqs;
408 
409 	sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
410 		      sizeof(*sqs), GFP_KERNEL);
411 	if (!sqs)
412 		goto out;
413 
414 	for (n = 0; n < priv->channels.num; n++) {
415 		c = priv->channels.c[n];
416 		for (tc = 0; tc < c->num_tc; tc++)
417 			sqs[num_sqs++] = c->sq[tc].sqn;
418 	}
419 
420 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
421 	kfree(sqs);
422 
423 out:
424 	if (err)
425 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
426 	return err;
427 }
428 
429 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
430 {
431 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
432 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
433 	struct mlx5_eswitch_rep *rep = rpriv->rep;
434 
435 	mlx5e_sqs2vport_stop(esw, rep);
436 }
437 
438 static int mlx5e_rep_open(struct net_device *dev)
439 {
440 	struct mlx5e_priv *priv = netdev_priv(dev);
441 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
442 	struct mlx5_eswitch_rep *rep = rpriv->rep;
443 	int err;
444 
445 	mutex_lock(&priv->state_lock);
446 	err = mlx5e_open_locked(dev);
447 	if (err)
448 		goto unlock;
449 
450 	if (!mlx5_modify_vport_admin_state(priv->mdev,
451 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
452 					   rep->vport, 1,
453 					   MLX5_VPORT_ADMIN_STATE_UP))
454 		netif_carrier_on(dev);
455 
456 unlock:
457 	mutex_unlock(&priv->state_lock);
458 	return err;
459 }
460 
461 static int mlx5e_rep_close(struct net_device *dev)
462 {
463 	struct mlx5e_priv *priv = netdev_priv(dev);
464 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
465 	struct mlx5_eswitch_rep *rep = rpriv->rep;
466 	int ret;
467 
468 	mutex_lock(&priv->state_lock);
469 	mlx5_modify_vport_admin_state(priv->mdev,
470 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
471 				      rep->vport, 1,
472 				      MLX5_VPORT_ADMIN_STATE_DOWN);
473 	ret = mlx5e_close_locked(dev);
474 	mutex_unlock(&priv->state_lock);
475 	return ret;
476 }
477 
478 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
479 {
480 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
481 	struct mlx5_eswitch_rep *rep;
482 
483 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
484 		return false;
485 
486 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
487 		return false;
488 
489 	rep = rpriv->rep;
490 	return (rep->vport == MLX5_VPORT_UPLINK);
491 }
492 
493 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
494 {
495 	switch (attr_id) {
496 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
497 			return true;
498 	}
499 
500 	return false;
501 }
502 
503 static int
504 mlx5e_get_sw_stats64(const struct net_device *dev,
505 		     struct rtnl_link_stats64 *stats)
506 {
507 	struct mlx5e_priv *priv = netdev_priv(dev);
508 
509 	mlx5e_fold_sw_stats64(priv, stats);
510 	return 0;
511 }
512 
513 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
514 				void *sp)
515 {
516 	switch (attr_id) {
517 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
518 		return mlx5e_get_sw_stats64(dev, sp);
519 	}
520 
521 	return -EINVAL;
522 }
523 
524 static void
525 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
526 {
527 	struct mlx5e_priv *priv = netdev_priv(dev);
528 
529 	/* update HW stats in background for next time */
530 	mlx5e_queue_update_stats(priv);
531 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
532 }
533 
534 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
535 {
536 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
537 }
538 
539 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
540 {
541 	struct mlx5e_priv *priv = netdev_priv(netdev);
542 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
543 	struct mlx5_core_dev *dev = priv->mdev;
544 
545 	return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
546 }
547 
548 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
549 {
550 	struct mlx5e_priv *priv = netdev_priv(dev);
551 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
552 	struct mlx5_eswitch_rep *rep = rpriv->rep;
553 	int err;
554 
555 	if (new_carrier) {
556 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
557 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
558 		if (err)
559 			return err;
560 		netif_carrier_on(dev);
561 	} else {
562 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
563 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
564 		if (err)
565 			return err;
566 		netif_carrier_off(dev);
567 	}
568 	return 0;
569 }
570 
571 static const struct net_device_ops mlx5e_netdev_ops_rep = {
572 	.ndo_open                = mlx5e_rep_open,
573 	.ndo_stop                = mlx5e_rep_close,
574 	.ndo_start_xmit          = mlx5e_xmit,
575 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
576 	.ndo_get_devlink_port    = mlx5e_rep_get_devlink_port,
577 	.ndo_get_stats64         = mlx5e_rep_get_stats,
578 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
579 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
580 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
581 	.ndo_change_carrier      = mlx5e_rep_change_carrier,
582 };
583 
584 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
585 {
586 	return netdev->netdev_ops == &mlx5e_netdev_ops &&
587 	       mlx5e_is_uplink_rep(netdev_priv(netdev));
588 }
589 
590 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
591 {
592 	return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
593 }
594 
595 static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
596 {
597 	return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
598 		mlx5_eswitch_get_total_vports(mdev);
599 }
600 
601 static void mlx5e_build_rep_params(struct net_device *netdev)
602 {
603 	struct mlx5e_priv *priv = netdev_priv(netdev);
604 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
605 	struct mlx5_eswitch_rep *rep = rpriv->rep;
606 	struct mlx5_core_dev *mdev = priv->mdev;
607 	struct mlx5e_params *params;
608 
609 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
610 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
611 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
612 
613 	params = &priv->channels.params;
614 
615 	params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
616 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
617 	params->sw_mtu      = netdev->mtu;
618 
619 	/* SQ */
620 	if (rep->vport == MLX5_VPORT_UPLINK)
621 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
622 	else
623 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
624 
625 	/* RQ */
626 	mlx5e_build_rq_params(mdev, params);
627 
628 	/* CQ moderation params */
629 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
630 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
631 
632 	params->mqprio.num_tc       = 1;
633 	params->tunneled_offload_en = false;
634 
635 	/* Set an initial non-zero value, so that mlx5e_select_queue won't
636 	 * divide by zero if called before first activating channels.
637 	 */
638 	priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
639 
640 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
641 }
642 
643 static void mlx5e_build_rep_netdev(struct net_device *netdev,
644 				   struct mlx5_core_dev *mdev)
645 {
646 	SET_NETDEV_DEV(netdev, mdev->device);
647 	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
648 	eth_hw_addr_random(netdev);
649 	netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
650 
651 	netdev->watchdog_timeo    = 15 * HZ;
652 
653 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
654 	netdev->hw_features    |= NETIF_F_HW_TC;
655 #endif
656 	netdev->hw_features    |= NETIF_F_SG;
657 	netdev->hw_features    |= NETIF_F_IP_CSUM;
658 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
659 	netdev->hw_features    |= NETIF_F_GRO;
660 	netdev->hw_features    |= NETIF_F_TSO;
661 	netdev->hw_features    |= NETIF_F_TSO6;
662 	netdev->hw_features    |= NETIF_F_RXCSUM;
663 
664 	netdev->features |= netdev->hw_features;
665 	netdev->features |= NETIF_F_NETNS_LOCAL;
666 }
667 
668 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
669 			  struct net_device *netdev)
670 {
671 	struct mlx5e_priv *priv = netdev_priv(netdev);
672 
673 	mlx5e_build_rep_params(netdev);
674 	mlx5e_timestamp_init(priv);
675 
676 	return 0;
677 }
678 
679 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
680 			     struct net_device *netdev)
681 {
682 	struct mlx5e_priv *priv = netdev_priv(netdev);
683 	int err;
684 
685 	err = mlx5e_ipsec_init(priv);
686 	if (err)
687 		mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
688 
689 	mlx5e_vxlan_set_netdev_info(priv);
690 	return mlx5e_init_rep(mdev, netdev);
691 }
692 
693 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
694 {
695 	mlx5e_ipsec_cleanup(priv);
696 }
697 
698 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
699 {
700 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
701 	struct mlx5_eswitch_rep *rep = rpriv->rep;
702 	struct ttc_params ttc_params = {};
703 	int err;
704 
705 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
706 					      MLX5_FLOW_NAMESPACE_KERNEL);
707 
708 	/* The inner_ttc in the ttc params is intentionally not set */
709 	mlx5e_set_ttc_params(priv, &ttc_params, false);
710 
711 	if (rep->vport != MLX5_VPORT_UPLINK)
712 		/* To give uplik rep TTC a lower level for chaining from root ft */
713 		ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
714 
715 	priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
716 	if (IS_ERR(priv->fs.ttc)) {
717 		err = PTR_ERR(priv->fs.ttc);
718 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
719 			   err);
720 		return err;
721 	}
722 	return 0;
723 }
724 
725 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
726 {
727 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
728 	struct mlx5_eswitch_rep *rep = rpriv->rep;
729 	struct mlx5_flow_table_attr ft_attr = {};
730 	struct mlx5_flow_namespace *ns;
731 	int err = 0;
732 
733 	if (rep->vport != MLX5_VPORT_UPLINK) {
734 		/* non uplik reps will skip any bypass tables and go directly to
735 		 * their own ttc
736 		 */
737 		rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
738 		return 0;
739 	}
740 
741 	/* uplink root ft will be used to auto chain, to ethtool or ttc tables */
742 	ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
743 	if (!ns) {
744 		netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
745 		return -EOPNOTSUPP;
746 	}
747 
748 	ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
749 	ft_attr.prio = 1;
750 	ft_attr.level = 1;
751 
752 	rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
753 	if (IS_ERR(rpriv->root_ft)) {
754 		err = PTR_ERR(rpriv->root_ft);
755 		rpriv->root_ft = NULL;
756 	}
757 
758 	return err;
759 }
760 
761 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
762 {
763 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
764 	struct mlx5_eswitch_rep *rep = rpriv->rep;
765 
766 	if (rep->vport != MLX5_VPORT_UPLINK)
767 		return;
768 	mlx5_destroy_flow_table(rpriv->root_ft);
769 }
770 
771 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
772 {
773 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
774 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
775 	struct mlx5_eswitch_rep *rep = rpriv->rep;
776 	struct mlx5_flow_handle *flow_rule;
777 	struct mlx5_flow_destination dest;
778 
779 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
780 	dest.ft = rpriv->root_ft;
781 
782 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
783 	if (IS_ERR(flow_rule))
784 		return PTR_ERR(flow_rule);
785 	rpriv->vport_rx_rule = flow_rule;
786 	return 0;
787 }
788 
789 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
790 {
791 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
792 
793 	if (!rpriv->vport_rx_rule)
794 		return;
795 
796 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
797 	rpriv->vport_rx_rule = NULL;
798 }
799 
800 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
801 {
802 	rep_vport_rx_rule_destroy(priv);
803 
804 	return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
805 }
806 
807 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
808 {
809 	struct mlx5_core_dev *mdev = priv->mdev;
810 	int err;
811 
812 	priv->rx_res = mlx5e_rx_res_alloc();
813 	if (!priv->rx_res)
814 		return -ENOMEM;
815 
816 	mlx5e_init_l2_addr(priv);
817 
818 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
819 	if (err) {
820 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
821 		return err;
822 	}
823 
824 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
825 				priv->max_nch, priv->drop_rq.rqn,
826 				&priv->channels.params.packet_merge,
827 				priv->channels.params.num_channels);
828 	if (err)
829 		goto err_close_drop_rq;
830 
831 	err = mlx5e_create_rep_ttc_table(priv);
832 	if (err)
833 		goto err_destroy_rx_res;
834 
835 	err = mlx5e_create_rep_root_ft(priv);
836 	if (err)
837 		goto err_destroy_ttc_table;
838 
839 	err = mlx5e_create_rep_vport_rx_rule(priv);
840 	if (err)
841 		goto err_destroy_root_ft;
842 
843 	mlx5e_ethtool_init_steering(priv);
844 
845 	return 0;
846 
847 err_destroy_root_ft:
848 	mlx5e_destroy_rep_root_ft(priv);
849 err_destroy_ttc_table:
850 	mlx5_destroy_ttc_table(priv->fs.ttc);
851 err_destroy_rx_res:
852 	mlx5e_rx_res_destroy(priv->rx_res);
853 err_close_drop_rq:
854 	mlx5e_close_drop_rq(&priv->drop_rq);
855 	mlx5e_rx_res_free(priv->rx_res);
856 	priv->rx_res = NULL;
857 	return err;
858 }
859 
860 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
861 {
862 	mlx5e_ethtool_cleanup_steering(priv);
863 	rep_vport_rx_rule_destroy(priv);
864 	mlx5e_destroy_rep_root_ft(priv);
865 	mlx5_destroy_ttc_table(priv->fs.ttc);
866 	mlx5e_rx_res_destroy(priv->rx_res);
867 	mlx5e_close_drop_rq(&priv->drop_rq);
868 	mlx5e_rx_res_free(priv->rx_res);
869 	priv->rx_res = NULL;
870 }
871 
872 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
873 {
874 	int err;
875 
876 	mlx5e_create_q_counters(priv);
877 	err = mlx5e_init_rep_rx(priv);
878 	if (err)
879 		goto out;
880 
881 	mlx5e_tc_int_port_init_rep_rx(priv);
882 
883 out:
884 	return err;
885 }
886 
887 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
888 {
889 	mlx5e_tc_int_port_cleanup_rep_rx(priv);
890 	mlx5e_cleanup_rep_rx(priv);
891 	mlx5e_destroy_q_counters(priv);
892 }
893 
894 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
895 {
896 	struct mlx5_rep_uplink_priv *uplink_priv;
897 	struct net_device *netdev;
898 	struct mlx5e_priv *priv;
899 	int err;
900 
901 	netdev = rpriv->netdev;
902 	priv = netdev_priv(netdev);
903 	uplink_priv = &rpriv->uplink_priv;
904 
905 	err = mlx5e_rep_tc_init(rpriv);
906 	if (err)
907 		return err;
908 
909 	mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
910 
911 	mlx5e_rep_bond_init(rpriv);
912 	err = mlx5e_rep_tc_netdevice_event_register(rpriv);
913 	if (err) {
914 		mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
915 			      err);
916 		goto err_event_reg;
917 	}
918 
919 	return 0;
920 
921 err_event_reg:
922 	mlx5e_rep_bond_cleanup(rpriv);
923 	mlx5e_rep_tc_cleanup(rpriv);
924 	return err;
925 }
926 
927 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
928 {
929 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
930 	int err;
931 
932 	err = mlx5e_create_tises(priv);
933 	if (err) {
934 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
935 		return err;
936 	}
937 
938 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
939 		err = mlx5e_init_uplink_rep_tx(rpriv);
940 		if (err)
941 			goto destroy_tises;
942 	}
943 
944 	return 0;
945 
946 destroy_tises:
947 	mlx5e_destroy_tises(priv);
948 	return err;
949 }
950 
951 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
952 {
953 	mlx5e_rep_tc_netdevice_event_unregister(rpriv);
954 	mlx5e_rep_bond_cleanup(rpriv);
955 	mlx5e_rep_tc_cleanup(rpriv);
956 }
957 
958 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
959 {
960 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
961 
962 	mlx5e_destroy_tises(priv);
963 
964 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
965 		mlx5e_cleanup_uplink_rep_tx(rpriv);
966 }
967 
968 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
969 {
970 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
971 
972 	mlx5e_set_netdev_mtu_boundaries(priv);
973 	mlx5e_rep_neigh_init(rpriv);
974 }
975 
976 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
977 {
978 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
979 
980 	mlx5e_rep_neigh_cleanup(rpriv);
981 }
982 
983 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
984 {
985 	return 0;
986 }
987 
988 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
989 {
990 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
991 
992 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
993 		struct mlx5_eqe *eqe = data;
994 
995 		switch (eqe->sub_type) {
996 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
997 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
998 			queue_work(priv->wq, &priv->update_carrier_work);
999 			break;
1000 		default:
1001 			return NOTIFY_DONE;
1002 		}
1003 
1004 		return NOTIFY_OK;
1005 	}
1006 
1007 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
1008 		return mlx5e_rep_tc_event_port_affinity(priv);
1009 
1010 	return NOTIFY_DONE;
1011 }
1012 
1013 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1014 {
1015 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1016 	struct net_device *netdev = priv->netdev;
1017 	struct mlx5_core_dev *mdev = priv->mdev;
1018 	u16 max_mtu;
1019 
1020 	netdev->min_mtu = ETH_MIN_MTU;
1021 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1022 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1023 	mlx5e_set_dev_port_mtu(priv);
1024 
1025 	mlx5e_rep_tc_enable(priv);
1026 
1027 	if (MLX5_CAP_GEN(mdev, uplink_follow))
1028 		mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1029 					      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1030 	mlx5_lag_add_netdev(mdev, netdev);
1031 	priv->events_nb.notifier_call = uplink_rep_async_event;
1032 	mlx5_notifier_register(mdev, &priv->events_nb);
1033 	mlx5e_dcbnl_initialize(priv);
1034 	mlx5e_dcbnl_init_app(priv);
1035 	mlx5e_rep_neigh_init(rpriv);
1036 	mlx5e_rep_bridge_init(priv);
1037 
1038 	netdev->wanted_features |= NETIF_F_HW_TC;
1039 
1040 	rtnl_lock();
1041 	if (netif_running(netdev))
1042 		mlx5e_open(netdev);
1043 	udp_tunnel_nic_reset_ntf(priv->netdev);
1044 	netif_device_attach(netdev);
1045 	rtnl_unlock();
1046 }
1047 
1048 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1049 {
1050 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1051 	struct mlx5_core_dev *mdev = priv->mdev;
1052 
1053 	rtnl_lock();
1054 	if (netif_running(priv->netdev))
1055 		mlx5e_close(priv->netdev);
1056 	netif_device_detach(priv->netdev);
1057 	rtnl_unlock();
1058 
1059 	mlx5e_rep_bridge_cleanup(priv);
1060 	mlx5e_rep_neigh_cleanup(rpriv);
1061 	mlx5e_dcbnl_delete_app(priv);
1062 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1063 	mlx5e_rep_tc_disable(priv);
1064 	mlx5_lag_remove_netdev(mdev, priv->netdev);
1065 	mlx5_vxlan_reset_to_default(mdev->vxlan);
1066 }
1067 
1068 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1069 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1070 
1071 /* The stats groups order is opposite to the update_stats() order calls */
1072 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1073 	&MLX5E_STATS_GRP(sw_rep),
1074 	&MLX5E_STATS_GRP(vport_rep),
1075 };
1076 
1077 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1078 {
1079 	return ARRAY_SIZE(mlx5e_rep_stats_grps);
1080 }
1081 
1082 /* The stats groups order is opposite to the update_stats() order calls */
1083 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1084 	&MLX5E_STATS_GRP(sw),
1085 	&MLX5E_STATS_GRP(qcnt),
1086 	&MLX5E_STATS_GRP(vnic_env),
1087 	&MLX5E_STATS_GRP(vport),
1088 	&MLX5E_STATS_GRP(802_3),
1089 	&MLX5E_STATS_GRP(2863),
1090 	&MLX5E_STATS_GRP(2819),
1091 	&MLX5E_STATS_GRP(phy),
1092 	&MLX5E_STATS_GRP(eth_ext),
1093 	&MLX5E_STATS_GRP(pcie),
1094 	&MLX5E_STATS_GRP(per_prio),
1095 	&MLX5E_STATS_GRP(pme),
1096 	&MLX5E_STATS_GRP(channels),
1097 	&MLX5E_STATS_GRP(per_port_buff_congest),
1098 #ifdef CONFIG_MLX5_EN_IPSEC
1099 	&MLX5E_STATS_GRP(ipsec_sw),
1100 	&MLX5E_STATS_GRP(ipsec_hw),
1101 #endif
1102 };
1103 
1104 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1105 {
1106 	return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1107 }
1108 
1109 static const struct mlx5e_profile mlx5e_rep_profile = {
1110 	.init			= mlx5e_init_rep,
1111 	.cleanup		= mlx5e_cleanup_rep,
1112 	.init_rx		= mlx5e_init_rep_rx,
1113 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1114 	.init_tx		= mlx5e_init_rep_tx,
1115 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1116 	.enable		        = mlx5e_rep_enable,
1117 	.disable	        = mlx5e_rep_disable,
1118 	.update_rx		= mlx5e_update_rep_rx,
1119 	.update_stats           = mlx5e_stats_update_ndo_stats,
1120 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1121 	.max_tc			= 1,
1122 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1123 	.stats_grps		= mlx5e_rep_stats_grps,
1124 	.stats_grps_num		= mlx5e_rep_stats_grps_num,
1125 	.max_nch_limit		= mlx5e_rep_max_nch_limit,
1126 };
1127 
1128 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1129 	.init			= mlx5e_init_ul_rep,
1130 	.cleanup		= mlx5e_cleanup_rep,
1131 	.init_rx		= mlx5e_init_ul_rep_rx,
1132 	.cleanup_rx		= mlx5e_cleanup_ul_rep_rx,
1133 	.init_tx		= mlx5e_init_rep_tx,
1134 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1135 	.enable		        = mlx5e_uplink_rep_enable,
1136 	.disable	        = mlx5e_uplink_rep_disable,
1137 	.update_rx		= mlx5e_update_rep_rx,
1138 	.update_stats           = mlx5e_stats_update_ndo_stats,
1139 	.update_carrier	        = mlx5e_update_carrier,
1140 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1141 	.max_tc			= MLX5E_MAX_NUM_TC,
1142 	/* XSK is needed so we can replace profile with NIC netdev */
1143 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(XSK),
1144 	.stats_grps		= mlx5e_ul_rep_stats_grps,
1145 	.stats_grps_num		= mlx5e_ul_rep_stats_grps_num,
1146 };
1147 
1148 /* e-Switch vport representors */
1149 static int
1150 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1151 {
1152 	struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1153 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1154 	struct devlink_port *dl_port;
1155 	int err;
1156 
1157 	rpriv->netdev = priv->netdev;
1158 
1159 	err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1160 					  rpriv);
1161 	if (err)
1162 		return err;
1163 
1164 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1165 	if (dl_port)
1166 		devlink_port_type_eth_set(dl_port, rpriv->netdev);
1167 
1168 	return 0;
1169 }
1170 
1171 static void
1172 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1173 {
1174 	struct net_device *netdev = rpriv->netdev;
1175 	struct devlink_port *dl_port;
1176 	struct mlx5_core_dev *dev;
1177 	struct mlx5e_priv *priv;
1178 
1179 	priv = netdev_priv(netdev);
1180 	dev = priv->mdev;
1181 
1182 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1183 	if (dl_port)
1184 		devlink_port_type_clear(dl_port);
1185 	mlx5e_netdev_attach_nic_profile(priv);
1186 }
1187 
1188 static int
1189 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1190 {
1191 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1192 	const struct mlx5e_profile *profile;
1193 	struct devlink_port *dl_port;
1194 	struct net_device *netdev;
1195 	struct mlx5e_priv *priv;
1196 	int err;
1197 
1198 	profile = &mlx5e_rep_profile;
1199 	netdev = mlx5e_create_netdev(dev, profile);
1200 	if (!netdev) {
1201 		mlx5_core_warn(dev,
1202 			       "Failed to create representor netdev for vport %d\n",
1203 			       rep->vport);
1204 		return -EINVAL;
1205 	}
1206 
1207 	mlx5e_build_rep_netdev(netdev, dev);
1208 	rpriv->netdev = netdev;
1209 
1210 	priv = netdev_priv(netdev);
1211 	priv->profile = profile;
1212 	priv->ppriv = rpriv;
1213 	err = profile->init(dev, netdev);
1214 	if (err) {
1215 		netdev_warn(netdev, "rep profile init failed, %d\n", err);
1216 		goto err_destroy_netdev;
1217 	}
1218 
1219 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1220 	if (err) {
1221 		netdev_warn(netdev,
1222 			    "Failed to attach representor netdev for vport %d\n",
1223 			    rep->vport);
1224 		goto err_cleanup_profile;
1225 	}
1226 
1227 	err = register_netdev(netdev);
1228 	if (err) {
1229 		netdev_warn(netdev,
1230 			    "Failed to register representor netdev for vport %d\n",
1231 			    rep->vport);
1232 		goto err_detach_netdev;
1233 	}
1234 
1235 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1236 	if (dl_port)
1237 		devlink_port_type_eth_set(dl_port, netdev);
1238 	return 0;
1239 
1240 err_detach_netdev:
1241 	mlx5e_detach_netdev(netdev_priv(netdev));
1242 
1243 err_cleanup_profile:
1244 	priv->profile->cleanup(priv);
1245 
1246 err_destroy_netdev:
1247 	mlx5e_destroy_netdev(netdev_priv(netdev));
1248 	return err;
1249 }
1250 
1251 static int
1252 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1253 {
1254 	struct mlx5e_rep_priv *rpriv;
1255 	int err;
1256 
1257 	rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1258 	if (!rpriv)
1259 		return -ENOMEM;
1260 
1261 	/* rpriv->rep to be looked up when profile->init() is called */
1262 	rpriv->rep = rep;
1263 	rep->rep_data[REP_ETH].priv = rpriv;
1264 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1265 
1266 	if (rep->vport == MLX5_VPORT_UPLINK)
1267 		err = mlx5e_vport_uplink_rep_load(dev, rep);
1268 	else
1269 		err = mlx5e_vport_vf_rep_load(dev, rep);
1270 
1271 	if (err)
1272 		kfree(rpriv);
1273 
1274 	return err;
1275 }
1276 
1277 static void
1278 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1279 {
1280 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1281 	struct net_device *netdev = rpriv->netdev;
1282 	struct mlx5e_priv *priv = netdev_priv(netdev);
1283 	struct mlx5_core_dev *dev = priv->mdev;
1284 	struct devlink_port *dl_port;
1285 	void *ppriv = priv->ppriv;
1286 
1287 	if (rep->vport == MLX5_VPORT_UPLINK) {
1288 		mlx5e_vport_uplink_rep_unload(rpriv);
1289 		goto free_ppriv;
1290 	}
1291 
1292 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1293 	if (dl_port)
1294 		devlink_port_type_clear(dl_port);
1295 	unregister_netdev(netdev);
1296 	mlx5e_detach_netdev(priv);
1297 	priv->profile->cleanup(priv);
1298 	mlx5e_destroy_netdev(priv);
1299 free_ppriv:
1300 	kfree(ppriv); /* mlx5e_rep_priv */
1301 }
1302 
1303 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1304 {
1305 	struct mlx5e_rep_priv *rpriv;
1306 
1307 	rpriv = mlx5e_rep_to_rep_priv(rep);
1308 
1309 	return rpriv->netdev;
1310 }
1311 
1312 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
1313 {
1314 	struct mlx5e_rep_priv *rpriv;
1315 	struct mlx5e_rep_sq *rep_sq;
1316 
1317 	rpriv = mlx5e_rep_to_rep_priv(rep);
1318 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1319 		if (!rep_sq->send_to_vport_rule_peer)
1320 			continue;
1321 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
1322 		rep_sq->send_to_vport_rule_peer = NULL;
1323 	}
1324 }
1325 
1326 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1327 				      struct mlx5_eswitch_rep *rep,
1328 				      struct mlx5_eswitch *peer_esw)
1329 {
1330 	struct mlx5_flow_handle *flow_rule;
1331 	struct mlx5e_rep_priv *rpriv;
1332 	struct mlx5e_rep_sq *rep_sq;
1333 
1334 	rpriv = mlx5e_rep_to_rep_priv(rep);
1335 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1336 		if (rep_sq->send_to_vport_rule_peer)
1337 			continue;
1338 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
1339 		if (IS_ERR(flow_rule))
1340 			goto err_out;
1341 		rep_sq->send_to_vport_rule_peer = flow_rule;
1342 	}
1343 
1344 	return 0;
1345 err_out:
1346 	mlx5e_vport_rep_event_unpair(rep);
1347 	return PTR_ERR(flow_rule);
1348 }
1349 
1350 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1351 				 struct mlx5_eswitch_rep *rep,
1352 				 enum mlx5_switchdev_event event,
1353 				 void *data)
1354 {
1355 	int err = 0;
1356 
1357 	if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1358 		err = mlx5e_vport_rep_event_pair(esw, rep, data);
1359 	else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1360 		mlx5e_vport_rep_event_unpair(rep);
1361 
1362 	return err;
1363 }
1364 
1365 static const struct mlx5_eswitch_rep_ops rep_ops = {
1366 	.load = mlx5e_vport_rep_load,
1367 	.unload = mlx5e_vport_rep_unload,
1368 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1369 	.event = mlx5e_vport_rep_event,
1370 };
1371 
1372 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1373 			   const struct auxiliary_device_id *id)
1374 {
1375 	struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1376 	struct mlx5_core_dev *mdev = edev->mdev;
1377 	struct mlx5_eswitch *esw;
1378 
1379 	esw = mdev->priv.eswitch;
1380 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1381 	return 0;
1382 }
1383 
1384 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1385 {
1386 	struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1387 	struct mlx5_core_dev *mdev = vdev->mdev;
1388 	struct mlx5_eswitch *esw;
1389 
1390 	esw = mdev->priv.eswitch;
1391 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1392 }
1393 
1394 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1395 	{ .name = MLX5_ADEV_NAME ".eth-rep", },
1396 	{},
1397 };
1398 
1399 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1400 
1401 static struct auxiliary_driver mlx5e_rep_driver = {
1402 	.name = "eth-rep",
1403 	.probe = mlx5e_rep_probe,
1404 	.remove = mlx5e_rep_remove,
1405 	.id_table = mlx5e_rep_id_table,
1406 };
1407 
1408 int mlx5e_rep_init(void)
1409 {
1410 	return auxiliary_driver_register(&mlx5e_rep_driver);
1411 }
1412 
1413 void mlx5e_rep_cleanup(void)
1414 {
1415 	auxiliary_driver_unregister(&mlx5e_rep_driver);
1416 }
1417