1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/debugfs.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/devlink.h>
39 #include <net/ipv6_stubs.h>
40 
41 #include "eswitch.h"
42 #include "en.h"
43 #include "en_rep.h"
44 #include "en/params.h"
45 #include "en/txrx.h"
46 #include "en_tc.h"
47 #include "en/rep/tc.h"
48 #include "en/rep/neigh.h"
49 #include "en/rep/bridge.h"
50 #include "en/devlink.h"
51 #include "fs_core.h"
52 #include "lib/mlx5.h"
53 #include "lib/devcom.h"
54 #include "lib/vxlan.h"
55 #define CREATE_TRACE_POINTS
56 #include "diag/en_rep_tracepoint.h"
57 #include "diag/reporter_vnic.h"
58 #include "en_accel/ipsec.h"
59 #include "en/tc/int_port.h"
60 #include "en/ptp.h"
61 #include "en/fs_ethtool.h"
62 
63 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
64 	max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
65 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
66 
67 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
68 
69 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
70 				  struct ethtool_drvinfo *drvinfo)
71 {
72 	struct mlx5e_priv *priv = netdev_priv(dev);
73 	struct mlx5_core_dev *mdev = priv->mdev;
74 
75 	strscpy(drvinfo->driver, mlx5e_rep_driver_name,
76 		sizeof(drvinfo->driver));
77 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
78 		 "%d.%d.%04d (%.16s)",
79 		 fw_rev_maj(mdev), fw_rev_min(mdev),
80 		 fw_rev_sub(mdev), mdev->board_id);
81 }
82 
83 static const struct counter_desc sw_rep_stats_desc[] = {
84 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
85 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
86 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
87 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
88 };
89 
90 static const struct counter_desc vport_rep_stats_desc[] = {
91 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) },
92 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) },
93 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) },
94 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) },
95 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
96 			     rx_vport_rdma_unicast_packets) },
97 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) },
98 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
99 			     tx_vport_rdma_unicast_packets) },
100 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) },
101 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
102 			     rx_vport_rdma_multicast_packets) },
103 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
104 			     rx_vport_rdma_multicast_bytes) },
105 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
106 			     tx_vport_rdma_multicast_packets) },
107 	{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
108 			     tx_vport_rdma_multicast_bytes) },
109 };
110 
111 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
112 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
113 
114 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
115 {
116 	return NUM_VPORT_REP_SW_COUNTERS;
117 }
118 
119 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
120 {
121 	int i;
122 
123 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
124 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
125 		       sw_rep_stats_desc[i].format);
126 	return idx;
127 }
128 
129 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
130 {
131 	int i;
132 
133 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
134 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
135 						   sw_rep_stats_desc, i);
136 	return idx;
137 }
138 
139 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
140 {
141 	struct mlx5e_sw_stats *s = &priv->stats.sw;
142 	struct rtnl_link_stats64 stats64 = {};
143 
144 	memset(s, 0, sizeof(*s));
145 	mlx5e_fold_sw_stats64(priv, &stats64);
146 
147 	s->rx_packets = stats64.rx_packets;
148 	s->rx_bytes   = stats64.rx_bytes;
149 	s->tx_packets = stats64.tx_packets;
150 	s->tx_bytes   = stats64.tx_bytes;
151 	s->tx_queue_dropped = stats64.tx_dropped;
152 }
153 
154 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
155 {
156 	return NUM_VPORT_REP_HW_COUNTERS;
157 }
158 
159 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
160 {
161 	int i;
162 
163 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
164 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
165 	return idx;
166 }
167 
168 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
169 {
170 	int i;
171 
172 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
173 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
174 						   vport_rep_stats_desc, i);
175 	return idx;
176 }
177 
178 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
179 {
180 	struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
181 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
182 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
183 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
184 	struct mlx5_eswitch_rep *rep = rpriv->rep;
185 	u32 *out;
186 	int err;
187 
188 	out = kvzalloc(outlen, GFP_KERNEL);
189 	if (!out)
190 		return;
191 
192 	err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out);
193 	if (err) {
194 		netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
195 			    rep->vport, err);
196 		goto out;
197 	}
198 
199 	#define MLX5_GET_CTR(p, x) \
200 		MLX5_GET64(query_vport_counter_out, p, x)
201 	/* flip tx/rx as we are reporting the counters for the switch vport */
202 	rep_stats->vport_rx_packets =
203 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
204 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
205 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
206 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
207 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
208 
209 	rep_stats->vport_tx_packets =
210 		MLX5_GET_CTR(out, received_ib_unicast.packets) +
211 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
212 		MLX5_GET_CTR(out, received_ib_multicast.packets) +
213 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
214 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
215 
216 	rep_stats->vport_rx_bytes =
217 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
218 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
219 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
220 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
221 
222 	rep_stats->vport_tx_bytes =
223 		MLX5_GET_CTR(out, received_ib_unicast.octets) +
224 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
225 		MLX5_GET_CTR(out, received_ib_multicast.octets) +
226 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
227 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
228 
229 	rep_stats->rx_vport_rdma_unicast_packets =
230 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets);
231 	rep_stats->tx_vport_rdma_unicast_packets =
232 		MLX5_GET_CTR(out, received_ib_unicast.packets);
233 	rep_stats->rx_vport_rdma_unicast_bytes =
234 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets);
235 	rep_stats->tx_vport_rdma_unicast_bytes =
236 		MLX5_GET_CTR(out, received_ib_unicast.octets);
237 	rep_stats->rx_vport_rdma_multicast_packets =
238 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets);
239 	rep_stats->tx_vport_rdma_multicast_packets =
240 		MLX5_GET_CTR(out, received_ib_multicast.packets);
241 	rep_stats->rx_vport_rdma_multicast_bytes =
242 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets);
243 	rep_stats->tx_vport_rdma_multicast_bytes =
244 		MLX5_GET_CTR(out, received_ib_multicast.octets);
245 
246 out:
247 	kvfree(out);
248 }
249 
250 static void mlx5e_rep_get_strings(struct net_device *dev,
251 				  u32 stringset, uint8_t *data)
252 {
253 	struct mlx5e_priv *priv = netdev_priv(dev);
254 
255 	switch (stringset) {
256 	case ETH_SS_STATS:
257 		mlx5e_stats_fill_strings(priv, data);
258 		break;
259 	}
260 }
261 
262 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
263 					struct ethtool_stats *stats, u64 *data)
264 {
265 	struct mlx5e_priv *priv = netdev_priv(dev);
266 
267 	mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
268 }
269 
270 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
271 {
272 	struct mlx5e_priv *priv = netdev_priv(dev);
273 
274 	switch (sset) {
275 	case ETH_SS_STATS:
276 		return mlx5e_stats_total_num(priv);
277 	default:
278 		return -EOPNOTSUPP;
279 	}
280 }
281 
282 static void
283 mlx5e_rep_get_ringparam(struct net_device *dev,
284 			struct ethtool_ringparam *param,
285 			struct kernel_ethtool_ringparam *kernel_param,
286 			struct netlink_ext_ack *extack)
287 {
288 	struct mlx5e_priv *priv = netdev_priv(dev);
289 
290 	mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
291 }
292 
293 static int
294 mlx5e_rep_set_ringparam(struct net_device *dev,
295 			struct ethtool_ringparam *param,
296 			struct kernel_ethtool_ringparam *kernel_param,
297 			struct netlink_ext_ack *extack)
298 {
299 	struct mlx5e_priv *priv = netdev_priv(dev);
300 
301 	return mlx5e_ethtool_set_ringparam(priv, param);
302 }
303 
304 static void mlx5e_rep_get_channels(struct net_device *dev,
305 				   struct ethtool_channels *ch)
306 {
307 	struct mlx5e_priv *priv = netdev_priv(dev);
308 
309 	mlx5e_ethtool_get_channels(priv, ch);
310 }
311 
312 static int mlx5e_rep_set_channels(struct net_device *dev,
313 				  struct ethtool_channels *ch)
314 {
315 	struct mlx5e_priv *priv = netdev_priv(dev);
316 
317 	return mlx5e_ethtool_set_channels(priv, ch);
318 }
319 
320 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
321 				  struct ethtool_coalesce *coal,
322 				  struct kernel_ethtool_coalesce *kernel_coal,
323 				  struct netlink_ext_ack *extack)
324 {
325 	struct mlx5e_priv *priv = netdev_priv(netdev);
326 
327 	return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
328 }
329 
330 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
331 				  struct ethtool_coalesce *coal,
332 				  struct kernel_ethtool_coalesce *kernel_coal,
333 				  struct netlink_ext_ack *extack)
334 {
335 	struct mlx5e_priv *priv = netdev_priv(netdev);
336 
337 	return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
338 }
339 
340 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
341 {
342 	struct mlx5e_priv *priv = netdev_priv(netdev);
343 
344 	return mlx5e_ethtool_get_rxfh_key_size(priv);
345 }
346 
347 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
348 {
349 	struct mlx5e_priv *priv = netdev_priv(netdev);
350 
351 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
352 }
353 
354 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
355 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
356 				     ETHTOOL_COALESCE_MAX_FRAMES |
357 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
358 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
359 	.get_link	   = ethtool_op_get_link,
360 	.get_strings       = mlx5e_rep_get_strings,
361 	.get_sset_count    = mlx5e_rep_get_sset_count,
362 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
363 	.get_ringparam     = mlx5e_rep_get_ringparam,
364 	.set_ringparam     = mlx5e_rep_set_ringparam,
365 	.get_channels      = mlx5e_rep_get_channels,
366 	.set_channels      = mlx5e_rep_set_channels,
367 	.get_coalesce      = mlx5e_rep_get_coalesce,
368 	.set_coalesce      = mlx5e_rep_set_coalesce,
369 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
370 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
371 };
372 
373 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
374 				 struct mlx5_eswitch_rep *rep)
375 {
376 	struct mlx5e_rep_sq *rep_sq, *tmp;
377 	struct mlx5e_rep_sq_peer *sq_peer;
378 	struct mlx5e_rep_priv *rpriv;
379 	unsigned long i;
380 
381 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
382 		return;
383 
384 	rpriv = mlx5e_rep_to_rep_priv(rep);
385 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
386 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
387 		xa_for_each(&rep_sq->sq_peer, i, sq_peer) {
388 			if (sq_peer->rule)
389 				mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule);
390 
391 			xa_erase(&rep_sq->sq_peer, i);
392 			kfree(sq_peer);
393 		}
394 
395 		xa_destroy(&rep_sq->sq_peer);
396 		list_del(&rep_sq->list);
397 		kfree(rep_sq);
398 	}
399 }
400 
401 static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
402 					   struct mlx5_devcom *devcom,
403 					   struct mlx5e_rep_sq *rep_sq, int i)
404 {
405 	struct mlx5_eswitch *peer_esw = NULL;
406 	struct mlx5_flow_handle *flow_rule;
407 	int tmp;
408 
409 	mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
410 					peer_esw, tmp) {
411 		u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
412 		struct mlx5e_rep_sq_peer *sq_peer;
413 		int err;
414 
415 		sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
416 		if (!sq_peer)
417 			return -ENOMEM;
418 
419 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
420 								rep, rep_sq->sqn);
421 		if (IS_ERR(flow_rule)) {
422 			kfree(sq_peer);
423 			return PTR_ERR(flow_rule);
424 		}
425 
426 		sq_peer->rule = flow_rule;
427 		sq_peer->peer = peer_esw;
428 		err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
429 		if (err) {
430 			kfree(sq_peer);
431 			mlx5_eswitch_del_send_to_vport_rule(flow_rule);
432 			return err;
433 		}
434 	}
435 
436 	return 0;
437 }
438 
439 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
440 				 struct mlx5_eswitch_rep *rep,
441 				 u32 *sqns_array, int sqns_num)
442 {
443 	struct mlx5_flow_handle *flow_rule;
444 	struct mlx5e_rep_priv *rpriv;
445 	struct mlx5e_rep_sq *rep_sq;
446 	struct mlx5_devcom *devcom;
447 	bool devcom_locked = false;
448 	int err;
449 	int i;
450 
451 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
452 		return 0;
453 
454 	devcom = esw->dev->priv.devcom;
455 	rpriv = mlx5e_rep_to_rep_priv(rep);
456 	if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
457 	    mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
458 		devcom_locked = true;
459 
460 	for (i = 0; i < sqns_num; i++) {
461 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
462 		if (!rep_sq) {
463 			err = -ENOMEM;
464 			goto out_err;
465 		}
466 
467 		/* Add re-inject rule to the PF/representor sqs */
468 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
469 								sqns_array[i]);
470 		if (IS_ERR(flow_rule)) {
471 			err = PTR_ERR(flow_rule);
472 			kfree(rep_sq);
473 			goto out_err;
474 		}
475 		rep_sq->send_to_vport_rule = flow_rule;
476 		rep_sq->sqn = sqns_array[i];
477 
478 		xa_init(&rep_sq->sq_peer);
479 		if (devcom_locked) {
480 			err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
481 			if (err) {
482 				mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
483 				xa_destroy(&rep_sq->sq_peer);
484 				kfree(rep_sq);
485 				goto out_err;
486 			}
487 		}
488 
489 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
490 	}
491 
492 	if (devcom_locked)
493 		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
494 
495 	return 0;
496 
497 out_err:
498 	mlx5e_sqs2vport_stop(esw, rep);
499 
500 	if (devcom_locked)
501 		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
502 
503 	return err;
504 }
505 
506 static int
507 mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
508 {
509 	int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
510 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
511 	bool is_uplink_rep = mlx5e_is_uplink_rep(priv);
512 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
513 	struct mlx5_eswitch_rep *rep = rpriv->rep;
514 	int n, tc, nch, num_sqs = 0;
515 	struct mlx5e_channel *c;
516 	int err = -ENOMEM;
517 	bool ptp_sq;
518 	u32 *sqs;
519 
520 	ptp_sq = !!(priv->channels.ptp &&
521 		    MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
522 	nch = priv->channels.num + ptp_sq;
523 	/* +2 for xdpsqs, they don't exist on the ptp channel but will not be
524 	 * counted for by num_sqs.
525 	 */
526 	if (is_uplink_rep)
527 		sqs_per_channel += 2;
528 
529 	sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL);
530 	if (!sqs)
531 		goto out;
532 
533 	for (n = 0; n < priv->channels.num; n++) {
534 		c = priv->channels.c[n];
535 		for (tc = 0; tc < c->num_tc; tc++)
536 			sqs[num_sqs++] = c->sq[tc].sqn;
537 
538 		if (is_uplink_rep) {
539 			if (c->xdp)
540 				sqs[num_sqs++] = c->rq_xdpsq.sqn;
541 
542 			sqs[num_sqs++] = c->xdpsq.sqn;
543 		}
544 	}
545 	if (ptp_sq) {
546 		struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
547 
548 		for (tc = 0; tc < ptp_ch->num_tc; tc++)
549 			sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
550 	}
551 
552 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
553 	kvfree(sqs);
554 
555 out:
556 	if (err)
557 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
558 	return err;
559 }
560 
561 static void
562 mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
563 {
564 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
565 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
566 	struct mlx5_eswitch_rep *rep = rpriv->rep;
567 
568 	mlx5e_sqs2vport_stop(esw, rep);
569 }
570 
571 static int
572 mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv)
573 {
574 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
575 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
576 	struct mlx5_eswitch_rep *rep = rpriv->rep;
577 	struct mlx5_flow_handle *flow_rule;
578 	struct mlx5_flow_group *g;
579 
580 	g = esw->fdb_table.offloads.send_to_vport_meta_grp;
581 	if (!g)
582 		return 0;
583 
584 	flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport);
585 	if (IS_ERR(flow_rule))
586 		return PTR_ERR(flow_rule);
587 
588 	rpriv->send_to_vport_meta_rule = flow_rule;
589 
590 	return 0;
591 }
592 
593 static void
594 mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv)
595 {
596 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
597 
598 	if (rpriv->send_to_vport_meta_rule)
599 		mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule);
600 }
601 
602 void mlx5e_rep_activate_channels(struct mlx5e_priv *priv)
603 {
604 	mlx5e_add_sqs_fwd_rules(priv);
605 	mlx5e_rep_add_meta_tunnel_rule(priv);
606 }
607 
608 void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv)
609 {
610 	mlx5e_rep_del_meta_tunnel_rule(priv);
611 	mlx5e_remove_sqs_fwd_rules(priv);
612 }
613 
614 static int mlx5e_rep_open(struct net_device *dev)
615 {
616 	struct mlx5e_priv *priv = netdev_priv(dev);
617 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
618 	struct mlx5_eswitch_rep *rep = rpriv->rep;
619 	int err;
620 
621 	mutex_lock(&priv->state_lock);
622 	err = mlx5e_open_locked(dev);
623 	if (err)
624 		goto unlock;
625 
626 	if (!mlx5_modify_vport_admin_state(priv->mdev,
627 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
628 					   rep->vport, 1,
629 					   MLX5_VPORT_ADMIN_STATE_UP))
630 		netif_carrier_on(dev);
631 
632 unlock:
633 	mutex_unlock(&priv->state_lock);
634 	return err;
635 }
636 
637 static int mlx5e_rep_close(struct net_device *dev)
638 {
639 	struct mlx5e_priv *priv = netdev_priv(dev);
640 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
641 	struct mlx5_eswitch_rep *rep = rpriv->rep;
642 	int ret;
643 
644 	mutex_lock(&priv->state_lock);
645 	mlx5_modify_vport_admin_state(priv->mdev,
646 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
647 				      rep->vport, 1,
648 				      MLX5_VPORT_ADMIN_STATE_DOWN);
649 	ret = mlx5e_close_locked(dev);
650 	mutex_unlock(&priv->state_lock);
651 	return ret;
652 }
653 
654 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
655 {
656 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
657 	struct mlx5_eswitch_rep *rep;
658 
659 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
660 		return false;
661 
662 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
663 		return false;
664 
665 	rep = rpriv->rep;
666 	return (rep->vport == MLX5_VPORT_UPLINK);
667 }
668 
669 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
670 {
671 	switch (attr_id) {
672 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
673 			return true;
674 	}
675 
676 	return false;
677 }
678 
679 static int
680 mlx5e_get_sw_stats64(const struct net_device *dev,
681 		     struct rtnl_link_stats64 *stats)
682 {
683 	struct mlx5e_priv *priv = netdev_priv(dev);
684 
685 	mlx5e_fold_sw_stats64(priv, stats);
686 	return 0;
687 }
688 
689 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
690 				void *sp)
691 {
692 	switch (attr_id) {
693 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
694 		return mlx5e_get_sw_stats64(dev, sp);
695 	}
696 
697 	return -EINVAL;
698 }
699 
700 static void
701 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
702 {
703 	struct mlx5e_priv *priv = netdev_priv(dev);
704 
705 	/* update HW stats in background for next time */
706 	mlx5e_queue_update_stats(priv);
707 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
708 }
709 
710 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
711 {
712 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
713 }
714 
715 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
716 {
717 	struct mlx5e_priv *priv = netdev_priv(dev);
718 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
719 	struct mlx5_eswitch_rep *rep = rpriv->rep;
720 	int err;
721 
722 	if (new_carrier) {
723 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
724 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
725 		if (err)
726 			return err;
727 		netif_carrier_on(dev);
728 	} else {
729 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
730 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
731 		if (err)
732 			return err;
733 		netif_carrier_off(dev);
734 	}
735 	return 0;
736 }
737 
738 static const struct net_device_ops mlx5e_netdev_ops_rep = {
739 	.ndo_open                = mlx5e_rep_open,
740 	.ndo_stop                = mlx5e_rep_close,
741 	.ndo_start_xmit          = mlx5e_xmit,
742 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
743 	.ndo_get_stats64         = mlx5e_rep_get_stats,
744 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
745 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
746 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
747 	.ndo_change_carrier      = mlx5e_rep_change_carrier,
748 };
749 
750 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
751 {
752 	return netdev->netdev_ops == &mlx5e_netdev_ops &&
753 	       mlx5e_is_uplink_rep(netdev_priv(netdev));
754 }
755 
756 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
757 {
758 	return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
759 }
760 
761 /* One indirect TIR set for outer. Inner not supported in reps. */
762 #define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS
763 
764 static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
765 {
766 	int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir);
767 	int num_vports = mlx5_eswitch_get_total_vports(mdev);
768 
769 	return (max_tir_num - mlx5e_get_pf_num_tirs(mdev)
770 		- (num_vports * REP_NUM_INDIR_TIRS)) / num_vports;
771 }
772 
773 static void mlx5e_build_rep_params(struct net_device *netdev)
774 {
775 	struct mlx5e_priv *priv = netdev_priv(netdev);
776 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
777 	struct mlx5_eswitch_rep *rep = rpriv->rep;
778 	struct mlx5_core_dev *mdev = priv->mdev;
779 	struct mlx5e_params *params;
780 
781 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
782 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
783 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
784 
785 	params = &priv->channels.params;
786 
787 	params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
788 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
789 	params->sw_mtu      = netdev->mtu;
790 
791 	/* SQ */
792 	if (rep->vport == MLX5_VPORT_UPLINK)
793 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
794 	else
795 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
796 
797 	/* RQ */
798 	mlx5e_build_rq_params(mdev, params);
799 
800 	/* update XDP supported features */
801 	mlx5e_set_xdp_feature(netdev);
802 
803 	/* CQ moderation params */
804 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
805 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
806 
807 	params->mqprio.num_tc       = 1;
808 	if (rep->vport != MLX5_VPORT_UPLINK)
809 		params->vlan_strip_disable = true;
810 
811 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
812 }
813 
814 static void mlx5e_build_rep_netdev(struct net_device *netdev,
815 				   struct mlx5_core_dev *mdev)
816 {
817 	SET_NETDEV_DEV(netdev, mdev->device);
818 	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
819 	eth_hw_addr_random(netdev);
820 	netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
821 
822 	netdev->watchdog_timeo    = 15 * HZ;
823 
824 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
825 	netdev->hw_features    |= NETIF_F_HW_TC;
826 #endif
827 	netdev->hw_features    |= NETIF_F_SG;
828 	netdev->hw_features    |= NETIF_F_IP_CSUM;
829 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
830 	netdev->hw_features    |= NETIF_F_GRO;
831 	netdev->hw_features    |= NETIF_F_TSO;
832 	netdev->hw_features    |= NETIF_F_TSO6;
833 	netdev->hw_features    |= NETIF_F_RXCSUM;
834 
835 	netdev->features |= netdev->hw_features;
836 	netdev->features |= NETIF_F_NETNS_LOCAL;
837 }
838 
839 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
840 			  struct net_device *netdev)
841 {
842 	struct mlx5e_priv *priv = netdev_priv(netdev);
843 
844 	priv->fs =
845 		mlx5e_fs_init(priv->profile, mdev,
846 			      !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
847 			      priv->dfs_root);
848 	if (!priv->fs) {
849 		netdev_err(priv->netdev, "FS allocation failed\n");
850 		return -ENOMEM;
851 	}
852 
853 	mlx5e_build_rep_params(netdev);
854 	mlx5e_timestamp_init(priv);
855 
856 	return 0;
857 }
858 
859 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
860 			     struct net_device *netdev)
861 {
862 	struct mlx5e_priv *priv = netdev_priv(netdev);
863 
864 	priv->dfs_root = debugfs_create_dir("nic",
865 					    mlx5_debugfs_get_dev_root(mdev));
866 
867 	priv->fs = mlx5e_fs_init(priv->profile, mdev,
868 				 !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
869 				 priv->dfs_root);
870 	if (!priv->fs) {
871 		netdev_err(priv->netdev, "FS allocation failed\n");
872 		debugfs_remove_recursive(priv->dfs_root);
873 		return -ENOMEM;
874 	}
875 
876 	mlx5e_vxlan_set_netdev_info(priv);
877 	mlx5e_build_rep_params(netdev);
878 	mlx5e_timestamp_init(priv);
879 	return 0;
880 }
881 
882 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
883 {
884 	mlx5e_fs_cleanup(priv->fs);
885 	debugfs_remove_recursive(priv->dfs_root);
886 	priv->fs = NULL;
887 }
888 
889 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
890 {
891 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
892 	struct mlx5_eswitch_rep *rep = rpriv->rep;
893 	struct ttc_params ttc_params = {};
894 	int err;
895 
896 	mlx5e_fs_set_ns(priv->fs,
897 			mlx5_get_flow_namespace(priv->mdev,
898 						MLX5_FLOW_NAMESPACE_KERNEL), false);
899 
900 	/* The inner_ttc in the ttc params is intentionally not set */
901 	mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
902 
903 	if (rep->vport != MLX5_VPORT_UPLINK)
904 		/* To give uplik rep TTC a lower level for chaining from root ft */
905 		ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
906 
907 	mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false);
908 	if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) {
909 		err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false));
910 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
911 			   err);
912 		return err;
913 	}
914 	return 0;
915 }
916 
917 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
918 {
919 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
920 	struct mlx5_eswitch_rep *rep = rpriv->rep;
921 	struct mlx5_flow_table_attr ft_attr = {};
922 	struct mlx5_flow_namespace *ns;
923 	int err = 0;
924 
925 	if (rep->vport != MLX5_VPORT_UPLINK) {
926 		/* non uplik reps will skip any bypass tables and go directly to
927 		 * their own ttc
928 		 */
929 		rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false));
930 		return 0;
931 	}
932 
933 	/* uplink root ft will be used to auto chain, to ethtool or ttc tables */
934 	ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
935 	if (!ns) {
936 		netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
937 		return -EOPNOTSUPP;
938 	}
939 
940 	ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
941 	ft_attr.prio = 1;
942 	ft_attr.level = 1;
943 
944 	rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
945 	if (IS_ERR(rpriv->root_ft)) {
946 		err = PTR_ERR(rpriv->root_ft);
947 		rpriv->root_ft = NULL;
948 	}
949 
950 	return err;
951 }
952 
953 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
954 {
955 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
956 	struct mlx5_eswitch_rep *rep = rpriv->rep;
957 
958 	if (rep->vport != MLX5_VPORT_UPLINK)
959 		return;
960 	mlx5_destroy_flow_table(rpriv->root_ft);
961 }
962 
963 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
964 {
965 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
966 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
967 	struct mlx5_eswitch_rep *rep = rpriv->rep;
968 	struct mlx5_flow_handle *flow_rule;
969 	struct mlx5_flow_destination dest;
970 
971 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
972 	dest.ft = rpriv->root_ft;
973 
974 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
975 	if (IS_ERR(flow_rule))
976 		return PTR_ERR(flow_rule);
977 	rpriv->vport_rx_rule = flow_rule;
978 	return 0;
979 }
980 
981 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
982 {
983 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
984 
985 	if (!rpriv->vport_rx_rule)
986 		return;
987 
988 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
989 	rpriv->vport_rx_rule = NULL;
990 }
991 
992 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
993 {
994 	rep_vport_rx_rule_destroy(priv);
995 
996 	return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
997 }
998 
999 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1000 {
1001 	struct mlx5_core_dev *mdev = priv->mdev;
1002 	int err;
1003 
1004 	priv->rx_res = mlx5e_rx_res_alloc();
1005 	if (!priv->rx_res) {
1006 		err = -ENOMEM;
1007 		goto err_free_fs;
1008 	}
1009 
1010 	mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
1011 
1012 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1013 	if (err) {
1014 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1015 		return err;
1016 	}
1017 
1018 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
1019 				priv->max_nch, priv->drop_rq.rqn,
1020 				&priv->channels.params.packet_merge,
1021 				priv->channels.params.num_channels);
1022 	if (err)
1023 		goto err_close_drop_rq;
1024 
1025 	err = mlx5e_create_rep_ttc_table(priv);
1026 	if (err)
1027 		goto err_destroy_rx_res;
1028 
1029 	err = mlx5e_create_rep_root_ft(priv);
1030 	if (err)
1031 		goto err_destroy_ttc_table;
1032 
1033 	err = mlx5e_create_rep_vport_rx_rule(priv);
1034 	if (err)
1035 		goto err_destroy_root_ft;
1036 
1037 	mlx5e_ethtool_init_steering(priv->fs);
1038 
1039 	return 0;
1040 
1041 err_destroy_root_ft:
1042 	mlx5e_destroy_rep_root_ft(priv);
1043 err_destroy_ttc_table:
1044 	mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
1045 err_destroy_rx_res:
1046 	mlx5e_rx_res_destroy(priv->rx_res);
1047 err_close_drop_rq:
1048 	mlx5e_close_drop_rq(&priv->drop_rq);
1049 	mlx5e_rx_res_free(priv->rx_res);
1050 	priv->rx_res = NULL;
1051 err_free_fs:
1052 	mlx5e_fs_cleanup(priv->fs);
1053 	priv->fs = NULL;
1054 	return err;
1055 }
1056 
1057 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1058 {
1059 	mlx5e_ethtool_cleanup_steering(priv->fs);
1060 	rep_vport_rx_rule_destroy(priv);
1061 	mlx5e_destroy_rep_root_ft(priv);
1062 	mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
1063 	mlx5e_rx_res_destroy(priv->rx_res);
1064 	mlx5e_close_drop_rq(&priv->drop_rq);
1065 	mlx5e_rx_res_free(priv->rx_res);
1066 	priv->rx_res = NULL;
1067 }
1068 
1069 static void mlx5e_rep_mpesw_work(struct work_struct *work)
1070 {
1071 	struct mlx5_rep_uplink_priv *uplink_priv =
1072 		container_of(work, struct mlx5_rep_uplink_priv,
1073 			     mpesw_work);
1074 	struct mlx5e_rep_priv *rpriv =
1075 		container_of(uplink_priv, struct mlx5e_rep_priv,
1076 			     uplink_priv);
1077 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1078 
1079 	rep_vport_rx_rule_destroy(priv);
1080 	mlx5e_create_rep_vport_rx_rule(priv);
1081 }
1082 
1083 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
1084 {
1085 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1086 	int err;
1087 
1088 	mlx5e_create_q_counters(priv);
1089 	err = mlx5e_init_rep_rx(priv);
1090 	if (err)
1091 		goto out;
1092 
1093 	mlx5e_tc_int_port_init_rep_rx(priv);
1094 
1095 	INIT_WORK(&rpriv->uplink_priv.mpesw_work, mlx5e_rep_mpesw_work);
1096 
1097 out:
1098 	return err;
1099 }
1100 
1101 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
1102 {
1103 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1104 
1105 	cancel_work_sync(&rpriv->uplink_priv.mpesw_work);
1106 	mlx5e_tc_int_port_cleanup_rep_rx(priv);
1107 	mlx5e_cleanup_rep_rx(priv);
1108 	mlx5e_destroy_q_counters(priv);
1109 }
1110 
1111 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1112 {
1113 	struct mlx5_rep_uplink_priv *uplink_priv;
1114 	struct net_device *netdev;
1115 	struct mlx5e_priv *priv;
1116 	int err;
1117 
1118 	netdev = rpriv->netdev;
1119 	priv = netdev_priv(netdev);
1120 	uplink_priv = &rpriv->uplink_priv;
1121 
1122 	err = mlx5e_rep_tc_init(rpriv);
1123 	if (err)
1124 		return err;
1125 
1126 	mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1127 
1128 	mlx5e_rep_bond_init(rpriv);
1129 	err = mlx5e_rep_tc_netdevice_event_register(rpriv);
1130 	if (err) {
1131 		mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
1132 			      err);
1133 		goto err_event_reg;
1134 	}
1135 
1136 	return 0;
1137 
1138 err_event_reg:
1139 	mlx5e_rep_bond_cleanup(rpriv);
1140 	mlx5e_rep_tc_cleanup(rpriv);
1141 	return err;
1142 }
1143 
1144 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1145 {
1146 	mlx5e_rep_tc_netdevice_event_unregister(rpriv);
1147 	mlx5e_rep_bond_cleanup(rpriv);
1148 	mlx5e_rep_tc_cleanup(rpriv);
1149 }
1150 
1151 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1152 {
1153 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1154 	int err;
1155 
1156 	err = mlx5e_create_tises(priv);
1157 	if (err) {
1158 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1159 		return err;
1160 	}
1161 
1162 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1163 		err = mlx5e_init_uplink_rep_tx(rpriv);
1164 		if (err)
1165 			goto err_init_tx;
1166 	}
1167 
1168 	err = mlx5e_tc_ht_init(&rpriv->tc_ht);
1169 	if (err)
1170 		goto err_ht_init;
1171 
1172 	return 0;
1173 
1174 err_ht_init:
1175 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1176 		mlx5e_cleanup_uplink_rep_tx(rpriv);
1177 err_init_tx:
1178 	mlx5e_destroy_tises(priv);
1179 	return err;
1180 }
1181 
1182 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1183 {
1184 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1185 
1186 	mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
1187 
1188 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1189 		mlx5e_cleanup_uplink_rep_tx(rpriv);
1190 
1191 	mlx5e_destroy_tises(priv);
1192 }
1193 
1194 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1195 {
1196 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1197 
1198 	mlx5e_set_netdev_mtu_boundaries(priv);
1199 	mlx5e_rep_neigh_init(rpriv);
1200 }
1201 
1202 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
1203 {
1204 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1205 
1206 	mlx5e_rep_neigh_cleanup(rpriv);
1207 }
1208 
1209 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1210 {
1211 	return 0;
1212 }
1213 
1214 static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
1215 {
1216 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1217 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1218 
1219 	if (rep->vport != MLX5_VPORT_UPLINK)
1220 		return NOTIFY_DONE;
1221 
1222 	queue_work(priv->wq, &rpriv->uplink_priv.mpesw_work);
1223 
1224 	return NOTIFY_OK;
1225 }
1226 
1227 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1228 {
1229 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1230 
1231 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1232 		struct mlx5_eqe *eqe = data;
1233 
1234 		switch (eqe->sub_type) {
1235 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1236 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1237 			queue_work(priv->wq, &priv->update_carrier_work);
1238 			break;
1239 		default:
1240 			return NOTIFY_DONE;
1241 		}
1242 
1243 		return NOTIFY_OK;
1244 	}
1245 
1246 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
1247 		return mlx5e_rep_tc_event_port_affinity(priv);
1248 	else if (event == MLX5_DEV_EVENT_MULTIPORT_ESW)
1249 		return mlx5e_rep_event_mpesw(priv);
1250 
1251 	return NOTIFY_DONE;
1252 }
1253 
1254 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1255 {
1256 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1257 	struct net_device *netdev = priv->netdev;
1258 	struct mlx5_core_dev *mdev = priv->mdev;
1259 	u16 max_mtu;
1260 
1261 	mlx5e_ipsec_init(priv);
1262 
1263 	netdev->min_mtu = ETH_MIN_MTU;
1264 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1265 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1266 	mlx5e_set_dev_port_mtu(priv);
1267 
1268 	mlx5e_rep_tc_enable(priv);
1269 
1270 	if (MLX5_CAP_GEN(mdev, uplink_follow))
1271 		mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1272 					      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1273 	mlx5_lag_add_netdev(mdev, netdev);
1274 	priv->events_nb.notifier_call = uplink_rep_async_event;
1275 	mlx5_notifier_register(mdev, &priv->events_nb);
1276 	mlx5e_dcbnl_initialize(priv);
1277 	mlx5e_dcbnl_init_app(priv);
1278 	mlx5e_rep_neigh_init(rpriv);
1279 	mlx5e_rep_bridge_init(priv);
1280 
1281 	netdev->wanted_features |= NETIF_F_HW_TC;
1282 
1283 	rtnl_lock();
1284 	if (netif_running(netdev))
1285 		mlx5e_open(netdev);
1286 	udp_tunnel_nic_reset_ntf(priv->netdev);
1287 	netif_device_attach(netdev);
1288 	rtnl_unlock();
1289 }
1290 
1291 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1292 {
1293 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1294 	struct mlx5_core_dev *mdev = priv->mdev;
1295 
1296 	rtnl_lock();
1297 	if (netif_running(priv->netdev))
1298 		mlx5e_close(priv->netdev);
1299 	netif_device_detach(priv->netdev);
1300 	rtnl_unlock();
1301 
1302 	mlx5e_rep_bridge_cleanup(priv);
1303 	mlx5e_rep_neigh_cleanup(rpriv);
1304 	mlx5e_dcbnl_delete_app(priv);
1305 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1306 	mlx5e_rep_tc_disable(priv);
1307 	mlx5_lag_remove_netdev(mdev, priv->netdev);
1308 	mlx5_vxlan_reset_to_default(mdev->vxlan);
1309 
1310 	mlx5e_ipsec_cleanup(priv);
1311 }
1312 
1313 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1314 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1315 
1316 /* The stats groups order is opposite to the update_stats() order calls */
1317 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1318 	&MLX5E_STATS_GRP(sw_rep),
1319 	&MLX5E_STATS_GRP(vport_rep),
1320 };
1321 
1322 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1323 {
1324 	return ARRAY_SIZE(mlx5e_rep_stats_grps);
1325 }
1326 
1327 /* The stats groups order is opposite to the update_stats() order calls */
1328 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1329 	&MLX5E_STATS_GRP(sw),
1330 	&MLX5E_STATS_GRP(qcnt),
1331 	&MLX5E_STATS_GRP(vnic_env),
1332 	&MLX5E_STATS_GRP(vport),
1333 	&MLX5E_STATS_GRP(802_3),
1334 	&MLX5E_STATS_GRP(2863),
1335 	&MLX5E_STATS_GRP(2819),
1336 	&MLX5E_STATS_GRP(phy),
1337 	&MLX5E_STATS_GRP(eth_ext),
1338 	&MLX5E_STATS_GRP(pcie),
1339 	&MLX5E_STATS_GRP(per_prio),
1340 	&MLX5E_STATS_GRP(pme),
1341 	&MLX5E_STATS_GRP(channels),
1342 	&MLX5E_STATS_GRP(per_port_buff_congest),
1343 #ifdef CONFIG_MLX5_EN_IPSEC
1344 	&MLX5E_STATS_GRP(ipsec_sw),
1345 #endif
1346 	&MLX5E_STATS_GRP(ptp),
1347 };
1348 
1349 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1350 {
1351 	return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1352 }
1353 
1354 static int
1355 mlx5e_rep_vnic_reporter_diagnose(struct devlink_health_reporter *reporter,
1356 				 struct devlink_fmsg *fmsg,
1357 				 struct netlink_ext_ack *extack)
1358 {
1359 	struct mlx5e_rep_priv *rpriv = devlink_health_reporter_priv(reporter);
1360 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1361 
1362 	return mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg,
1363 						    rep->vport, true);
1364 }
1365 
1366 static const struct devlink_health_reporter_ops mlx5_rep_vnic_reporter_ops = {
1367 	.name = "vnic",
1368 	.diagnose = mlx5e_rep_vnic_reporter_diagnose,
1369 };
1370 
1371 static void mlx5e_rep_vnic_reporter_create(struct mlx5e_priv *priv,
1372 					   struct devlink_port *dl_port)
1373 {
1374 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1375 	struct devlink_health_reporter *reporter;
1376 
1377 	reporter = devl_port_health_reporter_create(dl_port,
1378 						    &mlx5_rep_vnic_reporter_ops,
1379 						    0, rpriv);
1380 	if (IS_ERR(reporter)) {
1381 		mlx5_core_err(priv->mdev,
1382 			      "Failed to create representor vnic reporter, err = %ld\n",
1383 			      PTR_ERR(reporter));
1384 		return;
1385 	}
1386 
1387 	rpriv->rep_vnic_reporter = reporter;
1388 }
1389 
1390 static void mlx5e_rep_vnic_reporter_destroy(struct mlx5e_priv *priv)
1391 {
1392 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1393 
1394 	if (!IS_ERR_OR_NULL(rpriv->rep_vnic_reporter))
1395 		devl_health_reporter_destroy(rpriv->rep_vnic_reporter);
1396 }
1397 
1398 static const struct mlx5e_profile mlx5e_rep_profile = {
1399 	.init			= mlx5e_init_rep,
1400 	.cleanup		= mlx5e_cleanup_rep,
1401 	.init_rx		= mlx5e_init_rep_rx,
1402 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1403 	.init_tx		= mlx5e_init_rep_tx,
1404 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1405 	.enable		        = mlx5e_rep_enable,
1406 	.disable	        = mlx5e_rep_disable,
1407 	.update_rx		= mlx5e_update_rep_rx,
1408 	.update_stats           = mlx5e_stats_update_ndo_stats,
1409 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1410 	.max_tc			= 1,
1411 	.stats_grps		= mlx5e_rep_stats_grps,
1412 	.stats_grps_num		= mlx5e_rep_stats_grps_num,
1413 	.max_nch_limit		= mlx5e_rep_max_nch_limit,
1414 };
1415 
1416 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1417 	.init			= mlx5e_init_ul_rep,
1418 	.cleanup		= mlx5e_cleanup_rep,
1419 	.init_rx		= mlx5e_init_ul_rep_rx,
1420 	.cleanup_rx		= mlx5e_cleanup_ul_rep_rx,
1421 	.init_tx		= mlx5e_init_rep_tx,
1422 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1423 	.enable		        = mlx5e_uplink_rep_enable,
1424 	.disable	        = mlx5e_uplink_rep_disable,
1425 	.update_rx		= mlx5e_update_rep_rx,
1426 	.update_stats           = mlx5e_stats_update_ndo_stats,
1427 	.update_carrier	        = mlx5e_update_carrier,
1428 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1429 	.max_tc			= MLX5E_MAX_NUM_TC,
1430 	.stats_grps		= mlx5e_ul_rep_stats_grps,
1431 	.stats_grps_num		= mlx5e_ul_rep_stats_grps_num,
1432 };
1433 
1434 /* e-Switch vport representors */
1435 static int
1436 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1437 {
1438 	struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1439 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1440 
1441 	rpriv->netdev = priv->netdev;
1442 	return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1443 					   rpriv);
1444 }
1445 
1446 static void
1447 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1448 {
1449 	struct net_device *netdev = rpriv->netdev;
1450 	struct mlx5e_priv *priv;
1451 
1452 	priv = netdev_priv(netdev);
1453 
1454 	mlx5e_netdev_attach_nic_profile(priv);
1455 }
1456 
1457 static int
1458 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1459 {
1460 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1461 	const struct mlx5e_profile *profile;
1462 	struct devlink_port *dl_port;
1463 	struct net_device *netdev;
1464 	struct mlx5e_priv *priv;
1465 	int err;
1466 
1467 	profile = &mlx5e_rep_profile;
1468 	netdev = mlx5e_create_netdev(dev, profile);
1469 	if (!netdev) {
1470 		mlx5_core_warn(dev,
1471 			       "Failed to create representor netdev for vport %d\n",
1472 			       rep->vport);
1473 		return -EINVAL;
1474 	}
1475 
1476 	mlx5e_build_rep_netdev(netdev, dev);
1477 	rpriv->netdev = netdev;
1478 
1479 	priv = netdev_priv(netdev);
1480 	priv->profile = profile;
1481 	priv->ppriv = rpriv;
1482 	err = profile->init(dev, netdev);
1483 	if (err) {
1484 		netdev_warn(netdev, "rep profile init failed, %d\n", err);
1485 		goto err_destroy_netdev;
1486 	}
1487 
1488 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1489 	if (err) {
1490 		netdev_warn(netdev,
1491 			    "Failed to attach representor netdev for vport %d\n",
1492 			    rep->vport);
1493 		goto err_cleanup_profile;
1494 	}
1495 
1496 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
1497 						 rpriv->rep->vport);
1498 	if (dl_port) {
1499 		SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
1500 		mlx5e_rep_vnic_reporter_create(priv, dl_port);
1501 	}
1502 
1503 	err = register_netdev(netdev);
1504 	if (err) {
1505 		netdev_warn(netdev,
1506 			    "Failed to register representor netdev for vport %d\n",
1507 			    rep->vport);
1508 		goto err_detach_netdev;
1509 	}
1510 
1511 	return 0;
1512 
1513 err_detach_netdev:
1514 	mlx5e_rep_vnic_reporter_destroy(priv);
1515 	mlx5e_detach_netdev(netdev_priv(netdev));
1516 err_cleanup_profile:
1517 	priv->profile->cleanup(priv);
1518 
1519 err_destroy_netdev:
1520 	mlx5e_destroy_netdev(netdev_priv(netdev));
1521 	return err;
1522 }
1523 
1524 static int
1525 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1526 {
1527 	struct mlx5e_rep_priv *rpriv;
1528 	int err;
1529 
1530 	rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL);
1531 	if (!rpriv)
1532 		return -ENOMEM;
1533 
1534 	/* rpriv->rep to be looked up when profile->init() is called */
1535 	rpriv->rep = rep;
1536 	rep->rep_data[REP_ETH].priv = rpriv;
1537 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1538 
1539 	if (rep->vport == MLX5_VPORT_UPLINK)
1540 		err = mlx5e_vport_uplink_rep_load(dev, rep);
1541 	else
1542 		err = mlx5e_vport_vf_rep_load(dev, rep);
1543 
1544 	if (err)
1545 		kvfree(rpriv);
1546 
1547 	return err;
1548 }
1549 
1550 static void
1551 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1552 {
1553 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1554 	struct net_device *netdev = rpriv->netdev;
1555 	struct mlx5e_priv *priv = netdev_priv(netdev);
1556 	void *ppriv = priv->ppriv;
1557 
1558 	if (rep->vport == MLX5_VPORT_UPLINK) {
1559 		mlx5e_vport_uplink_rep_unload(rpriv);
1560 		goto free_ppriv;
1561 	}
1562 
1563 	unregister_netdev(netdev);
1564 	mlx5e_rep_vnic_reporter_destroy(priv);
1565 	mlx5e_detach_netdev(priv);
1566 	priv->profile->cleanup(priv);
1567 	mlx5e_destroy_netdev(priv);
1568 free_ppriv:
1569 	kvfree(ppriv); /* mlx5e_rep_priv */
1570 }
1571 
1572 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1573 {
1574 	struct mlx5e_rep_priv *rpriv;
1575 
1576 	rpriv = mlx5e_rep_to_rep_priv(rep);
1577 
1578 	return rpriv->netdev;
1579 }
1580 
1581 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep,
1582 					 struct mlx5_eswitch *peer_esw)
1583 {
1584 	u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
1585 	struct mlx5e_rep_priv *rpriv;
1586 	struct mlx5e_rep_sq *rep_sq;
1587 
1588 	WARN_ON_ONCE(!peer_esw);
1589 	rpriv = mlx5e_rep_to_rep_priv(rep);
1590 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1591 		struct mlx5e_rep_sq_peer *sq_peer = xa_load(&rep_sq->sq_peer, i);
1592 
1593 		if (!sq_peer || sq_peer->peer != peer_esw)
1594 			continue;
1595 
1596 		mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule);
1597 		xa_erase(&rep_sq->sq_peer, i);
1598 		kfree(sq_peer);
1599 	}
1600 }
1601 
1602 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1603 				      struct mlx5_eswitch_rep *rep,
1604 				      struct mlx5_eswitch *peer_esw)
1605 {
1606 	u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
1607 	struct mlx5_flow_handle *flow_rule;
1608 	struct mlx5e_rep_sq_peer *sq_peer;
1609 	struct mlx5e_rep_priv *rpriv;
1610 	struct mlx5e_rep_sq *rep_sq;
1611 	int err;
1612 
1613 	rpriv = mlx5e_rep_to_rep_priv(rep);
1614 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1615 		sq_peer = xa_load(&rep_sq->sq_peer, i);
1616 
1617 		if (sq_peer && sq_peer->peer)
1618 			continue;
1619 
1620 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep,
1621 								rep_sq->sqn);
1622 		if (IS_ERR(flow_rule)) {
1623 			err = PTR_ERR(flow_rule);
1624 			goto err_out;
1625 		}
1626 
1627 		if (sq_peer) {
1628 			sq_peer->rule = flow_rule;
1629 			sq_peer->peer = peer_esw;
1630 			continue;
1631 		}
1632 		sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
1633 		if (!sq_peer) {
1634 			err = -ENOMEM;
1635 			goto err_sq_alloc;
1636 		}
1637 		err = xa_insert(&rep_sq->sq_peer, i, sq_peer, GFP_KERNEL);
1638 		if (err)
1639 			goto err_xa;
1640 		sq_peer->rule = flow_rule;
1641 		sq_peer->peer = peer_esw;
1642 	}
1643 
1644 	return 0;
1645 err_xa:
1646 	kfree(sq_peer);
1647 err_sq_alloc:
1648 	mlx5_eswitch_del_send_to_vport_rule(flow_rule);
1649 err_out:
1650 	mlx5e_vport_rep_event_unpair(rep, peer_esw);
1651 	return err;
1652 }
1653 
1654 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1655 				 struct mlx5_eswitch_rep *rep,
1656 				 enum mlx5_switchdev_event event,
1657 				 void *data)
1658 {
1659 	int err = 0;
1660 
1661 	if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1662 		err = mlx5e_vport_rep_event_pair(esw, rep, data);
1663 	else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1664 		mlx5e_vport_rep_event_unpair(rep, data);
1665 
1666 	return err;
1667 }
1668 
1669 static const struct mlx5_eswitch_rep_ops rep_ops = {
1670 	.load = mlx5e_vport_rep_load,
1671 	.unload = mlx5e_vport_rep_unload,
1672 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1673 	.event = mlx5e_vport_rep_event,
1674 };
1675 
1676 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1677 			   const struct auxiliary_device_id *id)
1678 {
1679 	struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1680 	struct mlx5_core_dev *mdev = edev->mdev;
1681 	struct mlx5_eswitch *esw;
1682 
1683 	esw = mdev->priv.eswitch;
1684 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1685 	return 0;
1686 }
1687 
1688 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1689 {
1690 	struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1691 	struct mlx5_core_dev *mdev = vdev->mdev;
1692 	struct mlx5_eswitch *esw;
1693 
1694 	esw = mdev->priv.eswitch;
1695 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1696 }
1697 
1698 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1699 	{ .name = MLX5_ADEV_NAME ".eth-rep", },
1700 	{},
1701 };
1702 
1703 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1704 
1705 static struct auxiliary_driver mlx5e_rep_driver = {
1706 	.name = "eth-rep",
1707 	.probe = mlx5e_rep_probe,
1708 	.remove = mlx5e_rep_remove,
1709 	.id_table = mlx5e_rep_id_table,
1710 };
1711 
1712 int mlx5e_rep_init(void)
1713 {
1714 	return auxiliary_driver_register(&mlx5e_rep_driver);
1715 }
1716 
1717 void mlx5e_rep_cleanup(void)
1718 {
1719 	auxiliary_driver_unregister(&mlx5e_rep_driver);
1720 }
1721