1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
39 
40 #include "eswitch.h"
41 #include "en.h"
42 #include "en_rep.h"
43 #include "en/params.h"
44 #include "en/txrx.h"
45 #include "en_tc.h"
46 #include "en/rep/tc.h"
47 #include "en/rep/neigh.h"
48 #include "en/rep/bridge.h"
49 #include "en/devlink.h"
50 #include "fs_core.h"
51 #include "lib/mlx5.h"
52 #define CREATE_TRACE_POINTS
53 #include "diag/en_rep_tracepoint.h"
54 #include "en_accel/ipsec.h"
55 
56 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
57 	max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
58 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
59 
60 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
61 
62 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
63 				  struct ethtool_drvinfo *drvinfo)
64 {
65 	struct mlx5e_priv *priv = netdev_priv(dev);
66 	struct mlx5_core_dev *mdev = priv->mdev;
67 
68 	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
69 		sizeof(drvinfo->driver));
70 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
71 		 "%d.%d.%04d (%.16s)",
72 		 fw_rev_maj(mdev), fw_rev_min(mdev),
73 		 fw_rev_sub(mdev), mdev->board_id);
74 }
75 
76 static const struct counter_desc sw_rep_stats_desc[] = {
77 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
78 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
79 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
80 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
81 };
82 
83 struct vport_stats {
84 	u64 vport_rx_packets;
85 	u64 vport_tx_packets;
86 	u64 vport_rx_bytes;
87 	u64 vport_tx_bytes;
88 };
89 
90 static const struct counter_desc vport_rep_stats_desc[] = {
91 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
92 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
93 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
94 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
95 };
96 
97 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
98 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
99 
100 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
101 {
102 	return NUM_VPORT_REP_SW_COUNTERS;
103 }
104 
105 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
106 {
107 	int i;
108 
109 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
110 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
111 		       sw_rep_stats_desc[i].format);
112 	return idx;
113 }
114 
115 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
116 {
117 	int i;
118 
119 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
120 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
121 						   sw_rep_stats_desc, i);
122 	return idx;
123 }
124 
125 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
126 {
127 	struct mlx5e_sw_stats *s = &priv->stats.sw;
128 	struct rtnl_link_stats64 stats64 = {};
129 
130 	memset(s, 0, sizeof(*s));
131 	mlx5e_fold_sw_stats64(priv, &stats64);
132 
133 	s->rx_packets = stats64.rx_packets;
134 	s->rx_bytes   = stats64.rx_bytes;
135 	s->tx_packets = stats64.tx_packets;
136 	s->tx_bytes   = stats64.tx_bytes;
137 	s->tx_queue_dropped = stats64.tx_dropped;
138 }
139 
140 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
141 {
142 	return NUM_VPORT_REP_HW_COUNTERS;
143 }
144 
145 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
146 {
147 	int i;
148 
149 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
150 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
151 	return idx;
152 }
153 
154 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
155 {
156 	int i;
157 
158 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
159 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
160 						   vport_rep_stats_desc, i);
161 	return idx;
162 }
163 
164 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
165 {
166 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
167 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
168 	struct mlx5_eswitch_rep *rep = rpriv->rep;
169 	struct rtnl_link_stats64 *vport_stats;
170 	struct ifla_vf_stats vf_stats;
171 	int err;
172 
173 	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
174 	if (err) {
175 		netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
176 			    rep->vport, err);
177 		return;
178 	}
179 
180 	vport_stats = &priv->stats.vf_vport;
181 	/* flip tx/rx as we are reporting the counters for the switch vport */
182 	vport_stats->rx_packets = vf_stats.tx_packets;
183 	vport_stats->rx_bytes   = vf_stats.tx_bytes;
184 	vport_stats->tx_packets = vf_stats.rx_packets;
185 	vport_stats->tx_bytes   = vf_stats.rx_bytes;
186 }
187 
188 static void mlx5e_rep_get_strings(struct net_device *dev,
189 				  u32 stringset, uint8_t *data)
190 {
191 	struct mlx5e_priv *priv = netdev_priv(dev);
192 
193 	switch (stringset) {
194 	case ETH_SS_STATS:
195 		mlx5e_stats_fill_strings(priv, data);
196 		break;
197 	}
198 }
199 
200 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
201 					struct ethtool_stats *stats, u64 *data)
202 {
203 	struct mlx5e_priv *priv = netdev_priv(dev);
204 
205 	mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
206 }
207 
208 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
209 {
210 	struct mlx5e_priv *priv = netdev_priv(dev);
211 
212 	switch (sset) {
213 	case ETH_SS_STATS:
214 		return mlx5e_stats_total_num(priv);
215 	default:
216 		return -EOPNOTSUPP;
217 	}
218 }
219 
220 static void mlx5e_rep_get_ringparam(struct net_device *dev,
221 				struct ethtool_ringparam *param)
222 {
223 	struct mlx5e_priv *priv = netdev_priv(dev);
224 
225 	mlx5e_ethtool_get_ringparam(priv, param);
226 }
227 
228 static int mlx5e_rep_set_ringparam(struct net_device *dev,
229 			       struct ethtool_ringparam *param)
230 {
231 	struct mlx5e_priv *priv = netdev_priv(dev);
232 
233 	return mlx5e_ethtool_set_ringparam(priv, param);
234 }
235 
236 static void mlx5e_rep_get_channels(struct net_device *dev,
237 				   struct ethtool_channels *ch)
238 {
239 	struct mlx5e_priv *priv = netdev_priv(dev);
240 
241 	mlx5e_ethtool_get_channels(priv, ch);
242 }
243 
244 static int mlx5e_rep_set_channels(struct net_device *dev,
245 				  struct ethtool_channels *ch)
246 {
247 	struct mlx5e_priv *priv = netdev_priv(dev);
248 
249 	return mlx5e_ethtool_set_channels(priv, ch);
250 }
251 
252 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
253 				  struct ethtool_coalesce *coal)
254 {
255 	struct mlx5e_priv *priv = netdev_priv(netdev);
256 
257 	return mlx5e_ethtool_get_coalesce(priv, coal);
258 }
259 
260 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
261 				  struct ethtool_coalesce *coal)
262 {
263 	struct mlx5e_priv *priv = netdev_priv(netdev);
264 
265 	return mlx5e_ethtool_set_coalesce(priv, coal);
266 }
267 
268 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
269 {
270 	struct mlx5e_priv *priv = netdev_priv(netdev);
271 
272 	return mlx5e_ethtool_get_rxfh_key_size(priv);
273 }
274 
275 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
276 {
277 	struct mlx5e_priv *priv = netdev_priv(netdev);
278 
279 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
280 }
281 
282 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
283 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
284 				     ETHTOOL_COALESCE_MAX_FRAMES |
285 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
286 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
287 	.get_link	   = ethtool_op_get_link,
288 	.get_strings       = mlx5e_rep_get_strings,
289 	.get_sset_count    = mlx5e_rep_get_sset_count,
290 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
291 	.get_ringparam     = mlx5e_rep_get_ringparam,
292 	.set_ringparam     = mlx5e_rep_set_ringparam,
293 	.get_channels      = mlx5e_rep_get_channels,
294 	.set_channels      = mlx5e_rep_set_channels,
295 	.get_coalesce      = mlx5e_rep_get_coalesce,
296 	.set_coalesce      = mlx5e_rep_set_coalesce,
297 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
298 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
299 };
300 
301 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
302 				 struct mlx5_eswitch_rep *rep)
303 {
304 	struct mlx5e_rep_sq *rep_sq, *tmp;
305 	struct mlx5e_rep_priv *rpriv;
306 
307 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
308 		return;
309 
310 	rpriv = mlx5e_rep_to_rep_priv(rep);
311 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
312 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
313 		list_del(&rep_sq->list);
314 		kfree(rep_sq);
315 	}
316 }
317 
318 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
319 				 struct mlx5_eswitch_rep *rep,
320 				 u32 *sqns_array, int sqns_num)
321 {
322 	struct mlx5_flow_handle *flow_rule;
323 	struct mlx5e_rep_priv *rpriv;
324 	struct mlx5e_rep_sq *rep_sq;
325 	int err;
326 	int i;
327 
328 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
329 		return 0;
330 
331 	rpriv = mlx5e_rep_to_rep_priv(rep);
332 	for (i = 0; i < sqns_num; i++) {
333 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
334 		if (!rep_sq) {
335 			err = -ENOMEM;
336 			goto out_err;
337 		}
338 
339 		/* Add re-inject rule to the PF/representor sqs */
340 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep,
341 								sqns_array[i]);
342 		if (IS_ERR(flow_rule)) {
343 			err = PTR_ERR(flow_rule);
344 			kfree(rep_sq);
345 			goto out_err;
346 		}
347 		rep_sq->send_to_vport_rule = flow_rule;
348 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
349 	}
350 	return 0;
351 
352 out_err:
353 	mlx5e_sqs2vport_stop(esw, rep);
354 	return err;
355 }
356 
357 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
358 {
359 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
360 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
361 	struct mlx5_eswitch_rep *rep = rpriv->rep;
362 	struct mlx5e_channel *c;
363 	int n, tc, num_sqs = 0;
364 	int err = -ENOMEM;
365 	u32 *sqs;
366 
367 	sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
368 	if (!sqs)
369 		goto out;
370 
371 	for (n = 0; n < priv->channels.num; n++) {
372 		c = priv->channels.c[n];
373 		for (tc = 0; tc < c->num_tc; tc++)
374 			sqs[num_sqs++] = c->sq[tc].sqn;
375 	}
376 
377 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
378 	kfree(sqs);
379 
380 out:
381 	if (err)
382 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
383 	return err;
384 }
385 
386 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
387 {
388 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
389 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
390 	struct mlx5_eswitch_rep *rep = rpriv->rep;
391 
392 	mlx5e_sqs2vport_stop(esw, rep);
393 }
394 
395 static int mlx5e_rep_open(struct net_device *dev)
396 {
397 	struct mlx5e_priv *priv = netdev_priv(dev);
398 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
399 	struct mlx5_eswitch_rep *rep = rpriv->rep;
400 	int err;
401 
402 	mutex_lock(&priv->state_lock);
403 	err = mlx5e_open_locked(dev);
404 	if (err)
405 		goto unlock;
406 
407 	if (!mlx5_modify_vport_admin_state(priv->mdev,
408 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
409 					   rep->vport, 1,
410 					   MLX5_VPORT_ADMIN_STATE_UP))
411 		netif_carrier_on(dev);
412 
413 unlock:
414 	mutex_unlock(&priv->state_lock);
415 	return err;
416 }
417 
418 static int mlx5e_rep_close(struct net_device *dev)
419 {
420 	struct mlx5e_priv *priv = netdev_priv(dev);
421 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
422 	struct mlx5_eswitch_rep *rep = rpriv->rep;
423 	int ret;
424 
425 	mutex_lock(&priv->state_lock);
426 	mlx5_modify_vport_admin_state(priv->mdev,
427 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
428 				      rep->vport, 1,
429 				      MLX5_VPORT_ADMIN_STATE_DOWN);
430 	ret = mlx5e_close_locked(dev);
431 	mutex_unlock(&priv->state_lock);
432 	return ret;
433 }
434 
435 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
436 {
437 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
438 	struct mlx5_eswitch_rep *rep;
439 
440 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
441 		return false;
442 
443 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
444 		return false;
445 
446 	rep = rpriv->rep;
447 	return (rep->vport == MLX5_VPORT_UPLINK);
448 }
449 
450 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
451 {
452 	switch (attr_id) {
453 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
454 			return true;
455 	}
456 
457 	return false;
458 }
459 
460 static int
461 mlx5e_get_sw_stats64(const struct net_device *dev,
462 		     struct rtnl_link_stats64 *stats)
463 {
464 	struct mlx5e_priv *priv = netdev_priv(dev);
465 
466 	mlx5e_fold_sw_stats64(priv, stats);
467 	return 0;
468 }
469 
470 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
471 				void *sp)
472 {
473 	switch (attr_id) {
474 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
475 		return mlx5e_get_sw_stats64(dev, sp);
476 	}
477 
478 	return -EINVAL;
479 }
480 
481 static void
482 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
483 {
484 	struct mlx5e_priv *priv = netdev_priv(dev);
485 
486 	/* update HW stats in background for next time */
487 	mlx5e_queue_update_stats(priv);
488 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
489 }
490 
491 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
492 {
493 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
494 }
495 
496 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
497 {
498 	struct mlx5e_priv *priv = netdev_priv(netdev);
499 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
500 	struct mlx5_core_dev *dev = priv->mdev;
501 
502 	return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
503 }
504 
505 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
506 {
507 	struct mlx5e_priv *priv = netdev_priv(dev);
508 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
509 	struct mlx5_eswitch_rep *rep = rpriv->rep;
510 	int err;
511 
512 	if (new_carrier) {
513 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
514 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
515 		if (err)
516 			return err;
517 		netif_carrier_on(dev);
518 	} else {
519 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
520 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
521 		if (err)
522 			return err;
523 		netif_carrier_off(dev);
524 	}
525 	return 0;
526 }
527 
528 static const struct net_device_ops mlx5e_netdev_ops_rep = {
529 	.ndo_open                = mlx5e_rep_open,
530 	.ndo_stop                = mlx5e_rep_close,
531 	.ndo_start_xmit          = mlx5e_xmit,
532 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
533 	.ndo_get_devlink_port    = mlx5e_rep_get_devlink_port,
534 	.ndo_get_stats64         = mlx5e_rep_get_stats,
535 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
536 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
537 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
538 	.ndo_change_carrier      = mlx5e_rep_change_carrier,
539 };
540 
541 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
542 {
543 	return netdev->netdev_ops == &mlx5e_netdev_ops &&
544 	       mlx5e_is_uplink_rep(netdev_priv(netdev));
545 }
546 
547 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
548 {
549 	return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
550 }
551 
552 static void mlx5e_build_rep_params(struct net_device *netdev)
553 {
554 	struct mlx5e_priv *priv = netdev_priv(netdev);
555 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
556 	struct mlx5_eswitch_rep *rep = rpriv->rep;
557 	struct mlx5_core_dev *mdev = priv->mdev;
558 	struct mlx5e_params *params;
559 
560 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
561 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
562 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
563 
564 	priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
565 	params = &priv->channels.params;
566 
567 	params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
568 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
569 	params->sw_mtu      = netdev->mtu;
570 
571 	/* SQ */
572 	if (rep->vport == MLX5_VPORT_UPLINK)
573 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
574 	else
575 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
576 
577 	/* RQ */
578 	mlx5e_build_rq_params(mdev, params);
579 
580 	/* CQ moderation params */
581 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
582 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
583 
584 	params->num_tc                = 1;
585 	params->tunneled_offload_en = false;
586 
587 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
588 
589 	/* RSS */
590 	mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
591 }
592 
593 static void mlx5e_build_rep_netdev(struct net_device *netdev,
594 				   struct mlx5_core_dev *mdev)
595 {
596 	SET_NETDEV_DEV(netdev, mdev->device);
597 	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
598 	eth_hw_addr_random(netdev);
599 	netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
600 
601 	netdev->watchdog_timeo    = 15 * HZ;
602 
603 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
604 	netdev->hw_features    |= NETIF_F_HW_TC;
605 #endif
606 	netdev->hw_features    |= NETIF_F_SG;
607 	netdev->hw_features    |= NETIF_F_IP_CSUM;
608 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
609 	netdev->hw_features    |= NETIF_F_GRO;
610 	netdev->hw_features    |= NETIF_F_TSO;
611 	netdev->hw_features    |= NETIF_F_TSO6;
612 	netdev->hw_features    |= NETIF_F_RXCSUM;
613 
614 	netdev->features |= netdev->hw_features;
615 	netdev->features |= NETIF_F_VLAN_CHALLENGED;
616 	netdev->features |= NETIF_F_NETNS_LOCAL;
617 }
618 
619 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
620 			  struct net_device *netdev)
621 {
622 	struct mlx5e_priv *priv = netdev_priv(netdev);
623 
624 	mlx5e_build_rep_params(netdev);
625 	mlx5e_timestamp_init(priv);
626 
627 	return 0;
628 }
629 
630 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
631 			     struct net_device *netdev)
632 {
633 	struct mlx5e_priv *priv = netdev_priv(netdev);
634 	int err;
635 
636 	err = mlx5e_ipsec_init(priv);
637 	if (err)
638 		mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
639 
640 	mlx5e_vxlan_set_netdev_info(priv);
641 	return mlx5e_init_rep(mdev, netdev);
642 }
643 
644 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
645 {
646 	mlx5e_ipsec_cleanup(priv);
647 }
648 
649 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
650 {
651 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
652 	struct mlx5_eswitch_rep *rep = rpriv->rep;
653 	struct ttc_params ttc_params = {};
654 	int tt, err;
655 
656 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
657 					      MLX5_FLOW_NAMESPACE_KERNEL);
658 
659 	/* The inner_ttc in the ttc params is intentionally not set */
660 	ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
661 	mlx5e_set_ttc_ft_params(&ttc_params);
662 
663 	if (rep->vport != MLX5_VPORT_UPLINK)
664 		/* To give uplik rep TTC a lower level for chaining from root ft */
665 		ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
666 
667 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
668 		ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
669 
670 	err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
671 	if (err) {
672 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
673 		return err;
674 	}
675 	return 0;
676 }
677 
678 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
679 {
680 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
681 	struct mlx5_eswitch_rep *rep = rpriv->rep;
682 	struct mlx5_flow_table_attr ft_attr = {};
683 	struct mlx5_flow_namespace *ns;
684 	int err = 0;
685 
686 	if (rep->vport != MLX5_VPORT_UPLINK) {
687 		/* non uplik reps will skip any bypass tables and go directly to
688 		 * their own ttc
689 		 */
690 		rpriv->root_ft = priv->fs.ttc.ft.t;
691 		return 0;
692 	}
693 
694 	/* uplink root ft will be used to auto chain, to ethtool or ttc tables */
695 	ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
696 	if (!ns) {
697 		netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
698 		return -EOPNOTSUPP;
699 	}
700 
701 	ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
702 	ft_attr.prio = 1;
703 	ft_attr.level = 1;
704 
705 	rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
706 	if (IS_ERR(rpriv->root_ft)) {
707 		err = PTR_ERR(rpriv->root_ft);
708 		rpriv->root_ft = NULL;
709 	}
710 
711 	return err;
712 }
713 
714 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
715 {
716 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
717 	struct mlx5_eswitch_rep *rep = rpriv->rep;
718 
719 	if (rep->vport != MLX5_VPORT_UPLINK)
720 		return;
721 	mlx5_destroy_flow_table(rpriv->root_ft);
722 }
723 
724 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
725 {
726 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
727 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
728 	struct mlx5_eswitch_rep *rep = rpriv->rep;
729 	struct mlx5_flow_handle *flow_rule;
730 	struct mlx5_flow_destination dest;
731 
732 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
733 	dest.ft = rpriv->root_ft;
734 
735 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
736 	if (IS_ERR(flow_rule))
737 		return PTR_ERR(flow_rule);
738 	rpriv->vport_rx_rule = flow_rule;
739 	return 0;
740 }
741 
742 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
743 {
744 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
745 
746 	if (!rpriv->vport_rx_rule)
747 		return;
748 
749 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
750 	rpriv->vport_rx_rule = NULL;
751 }
752 
753 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
754 {
755 	rep_vport_rx_rule_destroy(priv);
756 
757 	return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
758 }
759 
760 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
761 {
762 	struct mlx5_core_dev *mdev = priv->mdev;
763 	u16 max_nch = priv->max_nch;
764 	int err;
765 
766 	mlx5e_init_l2_addr(priv);
767 
768 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
769 	if (err) {
770 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
771 		return err;
772 	}
773 
774 	err = mlx5e_create_indirect_rqt(priv);
775 	if (err)
776 		goto err_close_drop_rq;
777 
778 	err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
779 	if (err)
780 		goto err_destroy_indirect_rqts;
781 
782 	err = mlx5e_create_indirect_tirs(priv, false);
783 	if (err)
784 		goto err_destroy_direct_rqts;
785 
786 	err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
787 	if (err)
788 		goto err_destroy_indirect_tirs;
789 
790 	err = mlx5e_create_rep_ttc_table(priv);
791 	if (err)
792 		goto err_destroy_direct_tirs;
793 
794 	err = mlx5e_create_rep_root_ft(priv);
795 	if (err)
796 		goto err_destroy_ttc_table;
797 
798 	err = mlx5e_create_rep_vport_rx_rule(priv);
799 	if (err)
800 		goto err_destroy_root_ft;
801 
802 	mlx5e_ethtool_init_steering(priv);
803 
804 	return 0;
805 
806 err_destroy_root_ft:
807 	mlx5e_destroy_rep_root_ft(priv);
808 err_destroy_ttc_table:
809 	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
810 err_destroy_direct_tirs:
811 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
812 err_destroy_indirect_tirs:
813 	mlx5e_destroy_indirect_tirs(priv);
814 err_destroy_direct_rqts:
815 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
816 err_destroy_indirect_rqts:
817 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
818 err_close_drop_rq:
819 	mlx5e_close_drop_rq(&priv->drop_rq);
820 	return err;
821 }
822 
823 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
824 {
825 	u16 max_nch = priv->max_nch;
826 
827 	mlx5e_ethtool_cleanup_steering(priv);
828 	rep_vport_rx_rule_destroy(priv);
829 	mlx5e_destroy_rep_root_ft(priv);
830 	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
831 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
832 	mlx5e_destroy_indirect_tirs(priv);
833 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
834 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
835 	mlx5e_close_drop_rq(&priv->drop_rq);
836 }
837 
838 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
839 {
840 	mlx5e_create_q_counters(priv);
841 	return mlx5e_init_rep_rx(priv);
842 }
843 
844 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
845 {
846 	mlx5e_cleanup_rep_rx(priv);
847 	mlx5e_destroy_q_counters(priv);
848 }
849 
850 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
851 {
852 	struct mlx5_rep_uplink_priv *uplink_priv;
853 	struct net_device *netdev;
854 	struct mlx5e_priv *priv;
855 	int err;
856 
857 	netdev = rpriv->netdev;
858 	priv = netdev_priv(netdev);
859 	uplink_priv = &rpriv->uplink_priv;
860 
861 	err = mlx5e_rep_tc_init(rpriv);
862 	if (err)
863 		return err;
864 
865 	mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
866 
867 	mlx5e_rep_bond_init(rpriv);
868 	err = mlx5e_rep_tc_netdevice_event_register(rpriv);
869 	if (err) {
870 		mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
871 			      err);
872 		goto err_event_reg;
873 	}
874 
875 	return 0;
876 
877 err_event_reg:
878 	mlx5e_rep_bond_cleanup(rpriv);
879 	mlx5e_rep_tc_cleanup(rpriv);
880 	return err;
881 }
882 
883 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
884 {
885 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
886 	int err;
887 
888 	err = mlx5e_create_tises(priv);
889 	if (err) {
890 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
891 		return err;
892 	}
893 
894 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
895 		err = mlx5e_init_uplink_rep_tx(rpriv);
896 		if (err)
897 			goto destroy_tises;
898 	}
899 
900 	return 0;
901 
902 destroy_tises:
903 	mlx5e_destroy_tises(priv);
904 	return err;
905 }
906 
907 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
908 {
909 	mlx5e_rep_tc_netdevice_event_unregister(rpriv);
910 	mlx5e_rep_bond_cleanup(rpriv);
911 	mlx5e_rep_tc_cleanup(rpriv);
912 }
913 
914 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
915 {
916 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
917 
918 	mlx5e_destroy_tises(priv);
919 
920 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
921 		mlx5e_cleanup_uplink_rep_tx(rpriv);
922 }
923 
924 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
925 {
926 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
927 
928 	mlx5e_set_netdev_mtu_boundaries(priv);
929 	mlx5e_rep_neigh_init(rpriv);
930 }
931 
932 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
933 {
934 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
935 
936 	mlx5e_rep_neigh_cleanup(rpriv);
937 }
938 
939 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
940 {
941 	return 0;
942 }
943 
944 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
945 {
946 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
947 
948 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
949 		struct mlx5_eqe *eqe = data;
950 
951 		switch (eqe->sub_type) {
952 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
953 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
954 			queue_work(priv->wq, &priv->update_carrier_work);
955 			break;
956 		default:
957 			return NOTIFY_DONE;
958 		}
959 
960 		return NOTIFY_OK;
961 	}
962 
963 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
964 		return mlx5e_rep_tc_event_port_affinity(priv);
965 
966 	return NOTIFY_DONE;
967 }
968 
969 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
970 {
971 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
972 	struct net_device *netdev = priv->netdev;
973 	struct mlx5_core_dev *mdev = priv->mdev;
974 	u16 max_mtu;
975 
976 	netdev->min_mtu = ETH_MIN_MTU;
977 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
978 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
979 	mlx5e_set_dev_port_mtu(priv);
980 
981 	mlx5e_rep_tc_enable(priv);
982 
983 	if (MLX5_CAP_GEN(mdev, uplink_follow))
984 		mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
985 					      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
986 	mlx5_lag_add_netdev(mdev, netdev);
987 	priv->events_nb.notifier_call = uplink_rep_async_event;
988 	mlx5_notifier_register(mdev, &priv->events_nb);
989 	mlx5e_dcbnl_initialize(priv);
990 	mlx5e_dcbnl_init_app(priv);
991 	mlx5e_rep_neigh_init(rpriv);
992 	mlx5e_rep_bridge_init(priv);
993 
994 	netdev->wanted_features |= NETIF_F_HW_TC;
995 
996 	rtnl_lock();
997 	if (netif_running(netdev))
998 		mlx5e_open(netdev);
999 	netif_device_attach(netdev);
1000 	rtnl_unlock();
1001 }
1002 
1003 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1004 {
1005 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1006 	struct mlx5_core_dev *mdev = priv->mdev;
1007 
1008 	rtnl_lock();
1009 	if (netif_running(priv->netdev))
1010 		mlx5e_close(priv->netdev);
1011 	netif_device_detach(priv->netdev);
1012 	rtnl_unlock();
1013 
1014 	mlx5e_rep_bridge_cleanup(priv);
1015 	mlx5e_rep_neigh_cleanup(rpriv);
1016 	mlx5e_dcbnl_delete_app(priv);
1017 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1018 	mlx5e_rep_tc_disable(priv);
1019 	mlx5_lag_remove_netdev(mdev, priv->netdev);
1020 }
1021 
1022 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1023 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1024 
1025 /* The stats groups order is opposite to the update_stats() order calls */
1026 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1027 	&MLX5E_STATS_GRP(sw_rep),
1028 	&MLX5E_STATS_GRP(vport_rep),
1029 };
1030 
1031 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1032 {
1033 	return ARRAY_SIZE(mlx5e_rep_stats_grps);
1034 }
1035 
1036 /* The stats groups order is opposite to the update_stats() order calls */
1037 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1038 	&MLX5E_STATS_GRP(sw),
1039 	&MLX5E_STATS_GRP(qcnt),
1040 	&MLX5E_STATS_GRP(vnic_env),
1041 	&MLX5E_STATS_GRP(vport),
1042 	&MLX5E_STATS_GRP(802_3),
1043 	&MLX5E_STATS_GRP(2863),
1044 	&MLX5E_STATS_GRP(2819),
1045 	&MLX5E_STATS_GRP(phy),
1046 	&MLX5E_STATS_GRP(eth_ext),
1047 	&MLX5E_STATS_GRP(pcie),
1048 	&MLX5E_STATS_GRP(per_prio),
1049 	&MLX5E_STATS_GRP(pme),
1050 	&MLX5E_STATS_GRP(channels),
1051 	&MLX5E_STATS_GRP(per_port_buff_congest),
1052 };
1053 
1054 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1055 {
1056 	return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1057 }
1058 
1059 static const struct mlx5e_profile mlx5e_rep_profile = {
1060 	.init			= mlx5e_init_rep,
1061 	.cleanup		= mlx5e_cleanup_rep,
1062 	.init_rx		= mlx5e_init_rep_rx,
1063 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1064 	.init_tx		= mlx5e_init_rep_tx,
1065 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1066 	.enable		        = mlx5e_rep_enable,
1067 	.disable	        = mlx5e_rep_disable,
1068 	.update_rx		= mlx5e_update_rep_rx,
1069 	.update_stats           = mlx5e_stats_update_ndo_stats,
1070 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1071 	.max_tc			= 1,
1072 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1073 	.stats_grps		= mlx5e_rep_stats_grps,
1074 	.stats_grps_num		= mlx5e_rep_stats_grps_num,
1075 	.rx_ptp_support		= false,
1076 };
1077 
1078 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1079 	.init			= mlx5e_init_ul_rep,
1080 	.cleanup		= mlx5e_cleanup_rep,
1081 	.init_rx		= mlx5e_init_ul_rep_rx,
1082 	.cleanup_rx		= mlx5e_cleanup_ul_rep_rx,
1083 	.init_tx		= mlx5e_init_rep_tx,
1084 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1085 	.enable		        = mlx5e_uplink_rep_enable,
1086 	.disable	        = mlx5e_uplink_rep_disable,
1087 	.update_rx		= mlx5e_update_rep_rx,
1088 	.update_stats           = mlx5e_stats_update_ndo_stats,
1089 	.update_carrier	        = mlx5e_update_carrier,
1090 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1091 	.max_tc			= MLX5E_MAX_NUM_TC,
1092 	/* XSK is needed so we can replace profile with NIC netdev */
1093 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(XSK),
1094 	.stats_grps		= mlx5e_ul_rep_stats_grps,
1095 	.stats_grps_num		= mlx5e_ul_rep_stats_grps_num,
1096 	.rx_ptp_support		= false,
1097 };
1098 
1099 /* e-Switch vport representors */
1100 static int
1101 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1102 {
1103 	struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1104 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1105 	struct devlink_port *dl_port;
1106 	int err;
1107 
1108 	rpriv->netdev = priv->netdev;
1109 
1110 	err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1111 					  rpriv);
1112 	if (err)
1113 		return err;
1114 
1115 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1116 	if (dl_port)
1117 		devlink_port_type_eth_set(dl_port, rpriv->netdev);
1118 
1119 	return 0;
1120 }
1121 
1122 static void
1123 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1124 {
1125 	struct net_device *netdev = rpriv->netdev;
1126 	struct devlink_port *dl_port;
1127 	struct mlx5_core_dev *dev;
1128 	struct mlx5e_priv *priv;
1129 
1130 	priv = netdev_priv(netdev);
1131 	dev = priv->mdev;
1132 
1133 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1134 	if (dl_port)
1135 		devlink_port_type_clear(dl_port);
1136 	mlx5e_netdev_attach_nic_profile(priv);
1137 }
1138 
1139 static int
1140 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1141 {
1142 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1143 	const struct mlx5e_profile *profile;
1144 	struct devlink_port *dl_port;
1145 	struct net_device *netdev;
1146 	struct mlx5e_priv *priv;
1147 	unsigned int txqs, rxqs;
1148 	int nch, err;
1149 
1150 	profile = &mlx5e_rep_profile;
1151 	nch = mlx5e_get_max_num_channels(dev);
1152 	txqs = nch * profile->max_tc;
1153 	rxqs = nch * profile->rq_groups;
1154 	netdev = mlx5e_create_netdev(dev, txqs, rxqs);
1155 	if (!netdev) {
1156 		mlx5_core_warn(dev,
1157 			       "Failed to create representor netdev for vport %d\n",
1158 			       rep->vport);
1159 		return -EINVAL;
1160 	}
1161 
1162 	mlx5e_build_rep_netdev(netdev, dev);
1163 	rpriv->netdev = netdev;
1164 
1165 	priv = netdev_priv(netdev);
1166 	priv->profile = profile;
1167 	priv->ppriv = rpriv;
1168 	err = profile->init(dev, netdev);
1169 	if (err) {
1170 		netdev_warn(netdev, "rep profile init failed, %d\n", err);
1171 		goto err_destroy_netdev;
1172 	}
1173 
1174 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1175 	if (err) {
1176 		netdev_warn(netdev,
1177 			    "Failed to attach representor netdev for vport %d\n",
1178 			    rep->vport);
1179 		goto err_cleanup_profile;
1180 	}
1181 
1182 	err = register_netdev(netdev);
1183 	if (err) {
1184 		netdev_warn(netdev,
1185 			    "Failed to register representor netdev for vport %d\n",
1186 			    rep->vport);
1187 		goto err_detach_netdev;
1188 	}
1189 
1190 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1191 	if (dl_port)
1192 		devlink_port_type_eth_set(dl_port, netdev);
1193 	return 0;
1194 
1195 err_detach_netdev:
1196 	mlx5e_detach_netdev(netdev_priv(netdev));
1197 
1198 err_cleanup_profile:
1199 	priv->profile->cleanup(priv);
1200 
1201 err_destroy_netdev:
1202 	mlx5e_destroy_netdev(netdev_priv(netdev));
1203 	return err;
1204 }
1205 
1206 static int
1207 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1208 {
1209 	struct mlx5e_rep_priv *rpriv;
1210 	int err;
1211 
1212 	rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1213 	if (!rpriv)
1214 		return -ENOMEM;
1215 
1216 	/* rpriv->rep to be looked up when profile->init() is called */
1217 	rpriv->rep = rep;
1218 	rep->rep_data[REP_ETH].priv = rpriv;
1219 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1220 
1221 	if (rep->vport == MLX5_VPORT_UPLINK)
1222 		err = mlx5e_vport_uplink_rep_load(dev, rep);
1223 	else
1224 		err = mlx5e_vport_vf_rep_load(dev, rep);
1225 
1226 	if (err)
1227 		kfree(rpriv);
1228 
1229 	return err;
1230 }
1231 
1232 static void
1233 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1234 {
1235 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1236 	struct net_device *netdev = rpriv->netdev;
1237 	struct mlx5e_priv *priv = netdev_priv(netdev);
1238 	struct mlx5_core_dev *dev = priv->mdev;
1239 	struct devlink_port *dl_port;
1240 	void *ppriv = priv->ppriv;
1241 
1242 	if (rep->vport == MLX5_VPORT_UPLINK) {
1243 		mlx5e_vport_uplink_rep_unload(rpriv);
1244 		goto free_ppriv;
1245 	}
1246 
1247 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1248 	if (dl_port)
1249 		devlink_port_type_clear(dl_port);
1250 	unregister_netdev(netdev);
1251 	mlx5e_detach_netdev(priv);
1252 	priv->profile->cleanup(priv);
1253 	mlx5e_destroy_netdev(priv);
1254 free_ppriv:
1255 	kfree(ppriv); /* mlx5e_rep_priv */
1256 }
1257 
1258 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1259 {
1260 	struct mlx5e_rep_priv *rpriv;
1261 
1262 	rpriv = mlx5e_rep_to_rep_priv(rep);
1263 
1264 	return rpriv->netdev;
1265 }
1266 
1267 static const struct mlx5_eswitch_rep_ops rep_ops = {
1268 	.load = mlx5e_vport_rep_load,
1269 	.unload = mlx5e_vport_rep_unload,
1270 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev
1271 };
1272 
1273 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1274 			   const struct auxiliary_device_id *id)
1275 {
1276 	struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1277 	struct mlx5_core_dev *mdev = edev->mdev;
1278 	struct mlx5_eswitch *esw;
1279 
1280 	esw = mdev->priv.eswitch;
1281 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1282 	return 0;
1283 }
1284 
1285 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1286 {
1287 	struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1288 	struct mlx5_core_dev *mdev = vdev->mdev;
1289 	struct mlx5_eswitch *esw;
1290 
1291 	esw = mdev->priv.eswitch;
1292 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1293 }
1294 
1295 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1296 	{ .name = MLX5_ADEV_NAME ".eth-rep", },
1297 	{},
1298 };
1299 
1300 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1301 
1302 static struct auxiliary_driver mlx5e_rep_driver = {
1303 	.name = "eth-rep",
1304 	.probe = mlx5e_rep_probe,
1305 	.remove = mlx5e_rep_remove,
1306 	.id_table = mlx5e_rep_id_table,
1307 };
1308 
1309 int mlx5e_rep_init(void)
1310 {
1311 	return auxiliary_driver_register(&mlx5e_rep_driver);
1312 }
1313 
1314 void mlx5e_rep_cleanup(void)
1315 {
1316 	auxiliary_driver_unregister(&mlx5e_rep_driver);
1317 }
1318