1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 
41 enum {
42 	FDB_FAST_PATH = 0,
43 	FDB_SLOW_PATH
44 };
45 
46 struct mlx5_flow_rule *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 				struct mlx5_flow_spec *spec,
49 				u32 action, u32 src_vport, u32 dst_vport)
50 {
51 	struct mlx5_flow_destination dest = { 0 };
52 	struct mlx5_fc *counter = NULL;
53 	struct mlx5_flow_rule *rule;
54 	void *misc;
55 
56 	if (esw->mode != SRIOV_OFFLOADS)
57 		return ERR_PTR(-EOPNOTSUPP);
58 
59 	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
60 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
61 		dest.vport_num = dst_vport;
62 		action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
63 	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
64 		counter = mlx5_fc_create(esw->dev, true);
65 		if (IS_ERR(counter))
66 			return ERR_CAST(counter);
67 		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
68 		dest.counter = counter;
69 	}
70 
71 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
72 	MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
73 
74 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
75 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
76 
77 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
78 				      MLX5_MATCH_MISC_PARAMETERS;
79 
80 	rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb,
81 				  spec, action, 0, &dest);
82 
83 	if (IS_ERR(rule))
84 		mlx5_fc_destroy(esw->dev, counter);
85 
86 	return rule;
87 }
88 
89 static struct mlx5_flow_rule *
90 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
91 {
92 	struct mlx5_flow_destination dest;
93 	struct mlx5_flow_rule *flow_rule;
94 	struct mlx5_flow_spec *spec;
95 	void *misc;
96 
97 	spec = mlx5_vzalloc(sizeof(*spec));
98 	if (!spec) {
99 		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
100 		flow_rule = ERR_PTR(-ENOMEM);
101 		goto out;
102 	}
103 
104 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
105 	MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
106 	MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
107 
108 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
109 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
110 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
111 
112 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
113 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
114 	dest.vport_num = vport;
115 
116 	flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
117 				       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
118 				       0, &dest);
119 	if (IS_ERR(flow_rule))
120 		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
121 out:
122 	kvfree(spec);
123 	return flow_rule;
124 }
125 
126 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
127 				 struct mlx5_eswitch_rep *rep)
128 {
129 	struct mlx5_esw_sq *esw_sq, *tmp;
130 
131 	if (esw->mode != SRIOV_OFFLOADS)
132 		return;
133 
134 	list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
135 		mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
136 		list_del(&esw_sq->list);
137 		kfree(esw_sq);
138 	}
139 }
140 
141 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
142 				 struct mlx5_eswitch_rep *rep,
143 				 u16 *sqns_array, int sqns_num)
144 {
145 	struct mlx5_flow_rule *flow_rule;
146 	struct mlx5_esw_sq *esw_sq;
147 	int vport;
148 	int err;
149 	int i;
150 
151 	if (esw->mode != SRIOV_OFFLOADS)
152 		return 0;
153 
154 	vport = rep->vport == 0 ?
155 		FDB_UPLINK_VPORT : rep->vport;
156 
157 	for (i = 0; i < sqns_num; i++) {
158 		esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
159 		if (!esw_sq) {
160 			err = -ENOMEM;
161 			goto out_err;
162 		}
163 
164 		/* Add re-inject rule to the PF/representor sqs */
165 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
166 								vport,
167 								sqns_array[i]);
168 		if (IS_ERR(flow_rule)) {
169 			err = PTR_ERR(flow_rule);
170 			kfree(esw_sq);
171 			goto out_err;
172 		}
173 		esw_sq->send_to_vport_rule = flow_rule;
174 		list_add(&esw_sq->list, &rep->vport_sqs_list);
175 	}
176 	return 0;
177 
178 out_err:
179 	mlx5_eswitch_sqs2vport_stop(esw, rep);
180 	return err;
181 }
182 
183 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
184 {
185 	struct mlx5_flow_destination dest;
186 	struct mlx5_flow_rule *flow_rule = NULL;
187 	struct mlx5_flow_spec *spec;
188 	int err = 0;
189 
190 	spec = mlx5_vzalloc(sizeof(*spec));
191 	if (!spec) {
192 		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
193 		err = -ENOMEM;
194 		goto out;
195 	}
196 
197 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
198 	dest.vport_num = 0;
199 
200 	flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
201 				       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
202 				       0, &dest);
203 	if (IS_ERR(flow_rule)) {
204 		err = PTR_ERR(flow_rule);
205 		esw_warn(esw->dev,  "FDB: Failed to add miss flow rule err %d\n", err);
206 		goto out;
207 	}
208 
209 	esw->fdb_table.offloads.miss_rule = flow_rule;
210 out:
211 	kvfree(spec);
212 	return err;
213 }
214 
215 #define MAX_PF_SQ 256
216 #define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
217 #define ESW_OFFLOADS_NUM_GROUPS  4
218 
219 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
220 {
221 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
222 	struct mlx5_core_dev *dev = esw->dev;
223 	struct mlx5_flow_namespace *root_ns;
224 	struct mlx5_flow_table *fdb = NULL;
225 	struct mlx5_flow_group *g;
226 	u32 *flow_group_in;
227 	void *match_criteria;
228 	int table_size, ix, err = 0;
229 
230 	flow_group_in = mlx5_vzalloc(inlen);
231 	if (!flow_group_in)
232 		return -ENOMEM;
233 
234 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
235 	if (!root_ns) {
236 		esw_warn(dev, "Failed to get FDB flow namespace\n");
237 		goto ns_err;
238 	}
239 
240 	esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
241 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
242 
243 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
244 						  ESW_OFFLOADS_NUM_ENTRIES,
245 						  ESW_OFFLOADS_NUM_GROUPS, 0);
246 	if (IS_ERR(fdb)) {
247 		err = PTR_ERR(fdb);
248 		esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
249 		goto fast_fdb_err;
250 	}
251 	esw->fdb_table.fdb = fdb;
252 
253 	table_size = nvports + MAX_PF_SQ + 1;
254 	fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
255 	if (IS_ERR(fdb)) {
256 		err = PTR_ERR(fdb);
257 		esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
258 		goto slow_fdb_err;
259 	}
260 	esw->fdb_table.offloads.fdb = fdb;
261 
262 	/* create send-to-vport group */
263 	memset(flow_group_in, 0, inlen);
264 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
265 		 MLX5_MATCH_MISC_PARAMETERS);
266 
267 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
268 
269 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
270 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
271 
272 	ix = nvports + MAX_PF_SQ;
273 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
274 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
275 
276 	g = mlx5_create_flow_group(fdb, flow_group_in);
277 	if (IS_ERR(g)) {
278 		err = PTR_ERR(g);
279 		esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
280 		goto send_vport_err;
281 	}
282 	esw->fdb_table.offloads.send_to_vport_grp = g;
283 
284 	/* create miss group */
285 	memset(flow_group_in, 0, inlen);
286 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
287 
288 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
289 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
290 
291 	g = mlx5_create_flow_group(fdb, flow_group_in);
292 	if (IS_ERR(g)) {
293 		err = PTR_ERR(g);
294 		esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
295 		goto miss_err;
296 	}
297 	esw->fdb_table.offloads.miss_grp = g;
298 
299 	err = esw_add_fdb_miss_rule(esw);
300 	if (err)
301 		goto miss_rule_err;
302 
303 	return 0;
304 
305 miss_rule_err:
306 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
307 miss_err:
308 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
309 send_vport_err:
310 	mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
311 slow_fdb_err:
312 	mlx5_destroy_flow_table(esw->fdb_table.fdb);
313 fast_fdb_err:
314 ns_err:
315 	kvfree(flow_group_in);
316 	return err;
317 }
318 
319 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
320 {
321 	if (!esw->fdb_table.fdb)
322 		return;
323 
324 	esw_debug(esw->dev, "Destroy offloads FDB Table\n");
325 	mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
326 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
327 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
328 
329 	mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
330 	mlx5_destroy_flow_table(esw->fdb_table.fdb);
331 }
332 
333 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
334 {
335 	struct mlx5_flow_namespace *ns;
336 	struct mlx5_flow_table *ft_offloads;
337 	struct mlx5_core_dev *dev = esw->dev;
338 	int err = 0;
339 
340 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
341 	if (!ns) {
342 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
343 		return -ENOMEM;
344 	}
345 
346 	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
347 	if (IS_ERR(ft_offloads)) {
348 		err = PTR_ERR(ft_offloads);
349 		esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
350 		return err;
351 	}
352 
353 	esw->offloads.ft_offloads = ft_offloads;
354 	return 0;
355 }
356 
357 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
358 {
359 	struct mlx5_esw_offload *offloads = &esw->offloads;
360 
361 	mlx5_destroy_flow_table(offloads->ft_offloads);
362 }
363 
364 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
365 {
366 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
367 	struct mlx5_flow_group *g;
368 	struct mlx5_priv *priv = &esw->dev->priv;
369 	u32 *flow_group_in;
370 	void *match_criteria, *misc;
371 	int err = 0;
372 	int nvports = priv->sriov.num_vfs + 2;
373 
374 	flow_group_in = mlx5_vzalloc(inlen);
375 	if (!flow_group_in)
376 		return -ENOMEM;
377 
378 	/* create vport rx group */
379 	memset(flow_group_in, 0, inlen);
380 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
381 		 MLX5_MATCH_MISC_PARAMETERS);
382 
383 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
384 	misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
385 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
386 
387 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
388 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
389 
390 	g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
391 
392 	if (IS_ERR(g)) {
393 		err = PTR_ERR(g);
394 		mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
395 		goto out;
396 	}
397 
398 	esw->offloads.vport_rx_group = g;
399 out:
400 	kfree(flow_group_in);
401 	return err;
402 }
403 
404 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
405 {
406 	mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
407 }
408 
409 struct mlx5_flow_rule *
410 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
411 {
412 	struct mlx5_flow_destination dest;
413 	struct mlx5_flow_rule *flow_rule;
414 	struct mlx5_flow_spec *spec;
415 	void *misc;
416 
417 	spec = mlx5_vzalloc(sizeof(*spec));
418 	if (!spec) {
419 		esw_warn(esw->dev, "Failed to alloc match parameters\n");
420 		flow_rule = ERR_PTR(-ENOMEM);
421 		goto out;
422 	}
423 
424 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
425 	MLX5_SET(fte_match_set_misc, misc, source_port, vport);
426 
427 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
428 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
429 
430 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
431 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
432 	dest.tir_num = tirn;
433 
434 	flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
435 				       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
436 				       0, &dest);
437 	if (IS_ERR(flow_rule)) {
438 		esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
439 		goto out;
440 	}
441 
442 out:
443 	kvfree(spec);
444 	return flow_rule;
445 }
446 
447 static int esw_offloads_start(struct mlx5_eswitch *esw)
448 {
449 	int err, num_vfs = esw->dev->priv.sriov.num_vfs;
450 
451 	if (esw->mode != SRIOV_LEGACY) {
452 		esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
453 		return -EINVAL;
454 	}
455 
456 	mlx5_eswitch_disable_sriov(esw);
457 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
458 	if (err)
459 		esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
460 	return err;
461 }
462 
463 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
464 {
465 	struct mlx5_eswitch_rep *rep;
466 	int vport;
467 	int err;
468 
469 	err = esw_create_offloads_fdb_table(esw, nvports);
470 	if (err)
471 		return err;
472 
473 	err = esw_create_offloads_table(esw);
474 	if (err)
475 		goto create_ft_err;
476 
477 	err = esw_create_vport_rx_group(esw);
478 	if (err)
479 		goto create_fg_err;
480 
481 	for (vport = 0; vport < nvports; vport++) {
482 		rep = &esw->offloads.vport_reps[vport];
483 		if (!rep->valid)
484 			continue;
485 
486 		err = rep->load(esw, rep);
487 		if (err)
488 			goto err_reps;
489 	}
490 	return 0;
491 
492 err_reps:
493 	for (vport--; vport >= 0; vport--) {
494 		rep = &esw->offloads.vport_reps[vport];
495 		if (!rep->valid)
496 			continue;
497 		rep->unload(esw, rep);
498 	}
499 	esw_destroy_vport_rx_group(esw);
500 
501 create_fg_err:
502 	esw_destroy_offloads_table(esw);
503 
504 create_ft_err:
505 	esw_destroy_offloads_fdb_table(esw);
506 	return err;
507 }
508 
509 static int esw_offloads_stop(struct mlx5_eswitch *esw)
510 {
511 	int err, num_vfs = esw->dev->priv.sriov.num_vfs;
512 
513 	mlx5_eswitch_disable_sriov(esw);
514 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
515 	if (err)
516 		esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
517 
518 	return err;
519 }
520 
521 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
522 {
523 	struct mlx5_eswitch_rep *rep;
524 	int vport;
525 
526 	for (vport = 0; vport < nvports; vport++) {
527 		rep = &esw->offloads.vport_reps[vport];
528 		if (!rep->valid)
529 			continue;
530 		rep->unload(esw, rep);
531 	}
532 
533 	esw_destroy_vport_rx_group(esw);
534 	esw_destroy_offloads_table(esw);
535 	esw_destroy_offloads_fdb_table(esw);
536 }
537 
538 static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
539 {
540 	switch (mode) {
541 	case DEVLINK_ESWITCH_MODE_LEGACY:
542 		*mlx5_mode = SRIOV_LEGACY;
543 		break;
544 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
545 		*mlx5_mode = SRIOV_OFFLOADS;
546 		break;
547 	default:
548 		return -EINVAL;
549 	}
550 
551 	return 0;
552 }
553 
554 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
555 {
556 	struct mlx5_core_dev *dev;
557 	u16 cur_mlx5_mode, mlx5_mode = 0;
558 
559 	dev = devlink_priv(devlink);
560 
561 	if (!MLX5_CAP_GEN(dev, vport_group_manager))
562 		return -EOPNOTSUPP;
563 
564 	cur_mlx5_mode = dev->priv.eswitch->mode;
565 
566 	if (cur_mlx5_mode == SRIOV_NONE)
567 		return -EOPNOTSUPP;
568 
569 	if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
570 		return -EINVAL;
571 
572 	if (cur_mlx5_mode == mlx5_mode)
573 		return 0;
574 
575 	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
576 		return esw_offloads_start(dev->priv.eswitch);
577 	else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
578 		return esw_offloads_stop(dev->priv.eswitch);
579 	else
580 		return -EINVAL;
581 }
582 
583 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
584 {
585 	struct mlx5_core_dev *dev;
586 
587 	dev = devlink_priv(devlink);
588 
589 	if (!MLX5_CAP_GEN(dev, vport_group_manager))
590 		return -EOPNOTSUPP;
591 
592 	if (dev->priv.eswitch->mode == SRIOV_NONE)
593 		return -EOPNOTSUPP;
594 
595 	*mode = dev->priv.eswitch->mode;
596 
597 	return 0;
598 }
599 
600 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
601 				     struct mlx5_eswitch_rep *rep)
602 {
603 	struct mlx5_esw_offload *offloads = &esw->offloads;
604 
605 	memcpy(&offloads->vport_reps[rep->vport], rep,
606 	       sizeof(struct mlx5_eswitch_rep));
607 
608 	INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
609 	offloads->vport_reps[rep->vport].valid = true;
610 }
611 
612 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
613 				       int vport)
614 {
615 	struct mlx5_esw_offload *offloads = &esw->offloads;
616 	struct mlx5_eswitch_rep *rep;
617 
618 	rep = &offloads->vport_reps[vport];
619 
620 	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
621 		rep->unload(esw, rep);
622 
623 	offloads->vport_reps[vport].valid = false;
624 }
625