1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 
41 enum {
42 	FDB_FAST_PATH = 0,
43 	FDB_SLOW_PATH
44 };
45 
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 				struct mlx5_flow_spec *spec,
49 				struct mlx5_esw_flow_attr *attr)
50 {
51 	struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
52 	struct mlx5_flow_act flow_act = {0};
53 	struct mlx5_flow_table *ft = NULL;
54 	struct mlx5_fc *counter = NULL;
55 	struct mlx5_flow_handle *rule;
56 	int j, i = 0;
57 	void *misc;
58 
59 	if (esw->mode != SRIOV_OFFLOADS)
60 		return ERR_PTR(-EOPNOTSUPP);
61 
62 	if (attr->mirror_count)
63 		ft = esw->fdb_table.offloads.fwd_fdb;
64 	else
65 		ft = esw->fdb_table.offloads.fast_fdb;
66 
67 	flow_act.action = attr->action;
68 	/* if per flow vlan pop/push is emulated, don't set that into the firmware */
69 	if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
70 		flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
71 				     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
72 	else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
73 		flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
74 		flow_act.vlan.vid = attr->vlan_vid;
75 		flow_act.vlan.prio = attr->vlan_prio;
76 	}
77 
78 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
79 		for (j = attr->mirror_count; j < attr->out_count; j++) {
80 			dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
81 			dest[i].vport.num = attr->out_rep[j]->vport;
82 			dest[i].vport.vhca_id =
83 				MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
84 			dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
85 			i++;
86 		}
87 	}
88 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
89 		counter = mlx5_fc_create(esw->dev, true);
90 		if (IS_ERR(counter)) {
91 			rule = ERR_CAST(counter);
92 			goto err_counter_alloc;
93 		}
94 		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
95 		dest[i].counter = counter;
96 		i++;
97 	}
98 
99 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
100 	MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
101 
102 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
103 		MLX5_SET(fte_match_set_misc, misc,
104 			 source_eswitch_owner_vhca_id,
105 			 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
106 
107 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
108 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
109 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
110 		MLX5_SET_TO_ONES(fte_match_set_misc, misc,
111 				 source_eswitch_owner_vhca_id);
112 
113 	if (attr->match_level == MLX5_MATCH_NONE)
114 		spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
115 	else
116 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
117 					      MLX5_MATCH_MISC_PARAMETERS;
118 
119 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
120 		spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
121 
122 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
123 		flow_act.modify_id = attr->mod_hdr_id;
124 
125 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
126 		flow_act.encap_id = attr->encap_id;
127 
128 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
129 	if (IS_ERR(rule))
130 		goto err_add_rule;
131 	else
132 		esw->offloads.num_flows++;
133 
134 	return rule;
135 
136 err_add_rule:
137 	mlx5_fc_destroy(esw->dev, counter);
138 err_counter_alloc:
139 	return rule;
140 }
141 
142 struct mlx5_flow_handle *
143 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
144 			  struct mlx5_flow_spec *spec,
145 			  struct mlx5_esw_flow_attr *attr)
146 {
147 	struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
148 	struct mlx5_flow_act flow_act = {0};
149 	struct mlx5_flow_handle *rule;
150 	void *misc;
151 	int i;
152 
153 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
154 	for (i = 0; i < attr->mirror_count; i++) {
155 		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
156 		dest[i].vport.num = attr->out_rep[i]->vport;
157 		dest[i].vport.vhca_id =
158 			MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
159 		dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
160 	}
161 	dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
162 	dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
163 	i++;
164 
165 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
166 	MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
167 
168 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
169 		MLX5_SET(fte_match_set_misc, misc,
170 			 source_eswitch_owner_vhca_id,
171 			 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
172 
173 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
174 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
175 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
176 		MLX5_SET_TO_ONES(fte_match_set_misc, misc,
177 				 source_eswitch_owner_vhca_id);
178 
179 	if (attr->match_level == MLX5_MATCH_NONE)
180 		spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
181 	else
182 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
183 					      MLX5_MATCH_MISC_PARAMETERS;
184 
185 	rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
186 
187 	if (!IS_ERR(rule))
188 		esw->offloads.num_flows++;
189 
190 	return rule;
191 }
192 
193 void
194 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
195 				struct mlx5_flow_handle *rule,
196 				struct mlx5_esw_flow_attr *attr)
197 {
198 	struct mlx5_fc *counter = NULL;
199 
200 	counter = mlx5_flow_rule_counter(rule);
201 	mlx5_del_flow_rules(rule);
202 	mlx5_fc_destroy(esw->dev, counter);
203 	esw->offloads.num_flows--;
204 }
205 
206 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
207 {
208 	struct mlx5_eswitch_rep *rep;
209 	int vf_vport, err = 0;
210 
211 	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
212 	for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
213 		rep = &esw->offloads.vport_reps[vf_vport];
214 		if (!rep->rep_if[REP_ETH].valid)
215 			continue;
216 
217 		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
218 		if (err)
219 			goto out;
220 	}
221 
222 out:
223 	return err;
224 }
225 
226 static struct mlx5_eswitch_rep *
227 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
228 {
229 	struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
230 
231 	in_rep  = attr->in_rep;
232 	out_rep = attr->out_rep[0];
233 
234 	if (push)
235 		vport = in_rep;
236 	else if (pop)
237 		vport = out_rep;
238 	else
239 		vport = in_rep;
240 
241 	return vport;
242 }
243 
244 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
245 				     bool push, bool pop, bool fwd)
246 {
247 	struct mlx5_eswitch_rep *in_rep, *out_rep;
248 
249 	if ((push || pop) && !fwd)
250 		goto out_notsupp;
251 
252 	in_rep  = attr->in_rep;
253 	out_rep = attr->out_rep[0];
254 
255 	if (push && in_rep->vport == FDB_UPLINK_VPORT)
256 		goto out_notsupp;
257 
258 	if (pop && out_rep->vport == FDB_UPLINK_VPORT)
259 		goto out_notsupp;
260 
261 	/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
262 	if (!push && !pop && fwd)
263 		if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
264 			goto out_notsupp;
265 
266 	/* protects against (1) setting rules with different vlans to push and
267 	 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
268 	 */
269 	if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
270 		goto out_notsupp;
271 
272 	return 0;
273 
274 out_notsupp:
275 	return -EOPNOTSUPP;
276 }
277 
278 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
279 				 struct mlx5_esw_flow_attr *attr)
280 {
281 	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
282 	struct mlx5_eswitch_rep *vport = NULL;
283 	bool push, pop, fwd;
284 	int err = 0;
285 
286 	/* nop if we're on the vlan push/pop non emulation mode */
287 	if (mlx5_eswitch_vlan_actions_supported(esw->dev))
288 		return 0;
289 
290 	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
291 	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
292 	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
293 
294 	err = esw_add_vlan_action_check(attr, push, pop, fwd);
295 	if (err)
296 		return err;
297 
298 	attr->vlan_handled = false;
299 
300 	vport = esw_vlan_action_get_vport(attr, push, pop);
301 
302 	if (!push && !pop && fwd) {
303 		/* tracks VF --> wire rules without vlan push action */
304 		if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
305 			vport->vlan_refcount++;
306 			attr->vlan_handled = true;
307 		}
308 
309 		return 0;
310 	}
311 
312 	if (!push && !pop)
313 		return 0;
314 
315 	if (!(offloads->vlan_push_pop_refcount)) {
316 		/* it's the 1st vlan rule, apply global vlan pop policy */
317 		err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
318 		if (err)
319 			goto out;
320 	}
321 	offloads->vlan_push_pop_refcount++;
322 
323 	if (push) {
324 		if (vport->vlan_refcount)
325 			goto skip_set_push;
326 
327 		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
328 						    SET_VLAN_INSERT | SET_VLAN_STRIP);
329 		if (err)
330 			goto out;
331 		vport->vlan = attr->vlan_vid;
332 skip_set_push:
333 		vport->vlan_refcount++;
334 	}
335 out:
336 	if (!err)
337 		attr->vlan_handled = true;
338 	return err;
339 }
340 
341 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
342 				 struct mlx5_esw_flow_attr *attr)
343 {
344 	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
345 	struct mlx5_eswitch_rep *vport = NULL;
346 	bool push, pop, fwd;
347 	int err = 0;
348 
349 	/* nop if we're on the vlan push/pop non emulation mode */
350 	if (mlx5_eswitch_vlan_actions_supported(esw->dev))
351 		return 0;
352 
353 	if (!attr->vlan_handled)
354 		return 0;
355 
356 	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
357 	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
358 	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
359 
360 	vport = esw_vlan_action_get_vport(attr, push, pop);
361 
362 	if (!push && !pop && fwd) {
363 		/* tracks VF --> wire rules without vlan push action */
364 		if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
365 			vport->vlan_refcount--;
366 
367 		return 0;
368 	}
369 
370 	if (push) {
371 		vport->vlan_refcount--;
372 		if (vport->vlan_refcount)
373 			goto skip_unset_push;
374 
375 		vport->vlan = 0;
376 		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
377 						    0, 0, SET_VLAN_STRIP);
378 		if (err)
379 			goto out;
380 	}
381 
382 skip_unset_push:
383 	offloads->vlan_push_pop_refcount--;
384 	if (offloads->vlan_push_pop_refcount)
385 		return 0;
386 
387 	/* no more vlan rules, stop global vlan pop policy */
388 	err = esw_set_global_vlan_pop(esw, 0);
389 
390 out:
391 	return err;
392 }
393 
394 struct mlx5_flow_handle *
395 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
396 {
397 	struct mlx5_flow_act flow_act = {0};
398 	struct mlx5_flow_destination dest = {};
399 	struct mlx5_flow_handle *flow_rule;
400 	struct mlx5_flow_spec *spec;
401 	void *misc;
402 
403 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
404 	if (!spec) {
405 		flow_rule = ERR_PTR(-ENOMEM);
406 		goto out;
407 	}
408 
409 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
410 	MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
411 	MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
412 
413 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
414 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
415 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
416 
417 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
418 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
419 	dest.vport.num = vport;
420 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
421 
422 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
423 					&flow_act, &dest, 1);
424 	if (IS_ERR(flow_rule))
425 		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
426 out:
427 	kvfree(spec);
428 	return flow_rule;
429 }
430 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
431 
432 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
433 {
434 	mlx5_del_flow_rules(rule);
435 }
436 
437 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
438 {
439 	struct mlx5_flow_act flow_act = {0};
440 	struct mlx5_flow_destination dest = {};
441 	struct mlx5_flow_handle *flow_rule = NULL;
442 	struct mlx5_flow_spec *spec;
443 	void *headers_c;
444 	void *headers_v;
445 	int err = 0;
446 	u8 *dmac_c;
447 	u8 *dmac_v;
448 
449 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
450 	if (!spec) {
451 		err = -ENOMEM;
452 		goto out;
453 	}
454 
455 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
456 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
457 				 outer_headers);
458 	dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
459 			      outer_headers.dmac_47_16);
460 	dmac_c[0] = 0x01;
461 
462 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
463 	dest.vport.num = 0;
464 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
465 
466 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
467 					&flow_act, &dest, 1);
468 	if (IS_ERR(flow_rule)) {
469 		err = PTR_ERR(flow_rule);
470 		esw_warn(esw->dev,  "FDB: Failed to add unicast miss flow rule err %d\n", err);
471 		goto out;
472 	}
473 
474 	esw->fdb_table.offloads.miss_rule_uni = flow_rule;
475 
476 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
477 				 outer_headers);
478 	dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
479 			      outer_headers.dmac_47_16);
480 	dmac_v[0] = 0x01;
481 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
482 					&flow_act, &dest, 1);
483 	if (IS_ERR(flow_rule)) {
484 		err = PTR_ERR(flow_rule);
485 		esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
486 		mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
487 		goto out;
488 	}
489 
490 	esw->fdb_table.offloads.miss_rule_multi = flow_rule;
491 
492 out:
493 	kvfree(spec);
494 	return err;
495 }
496 
497 #define ESW_OFFLOADS_NUM_GROUPS  4
498 
499 static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
500 {
501 	struct mlx5_core_dev *dev = esw->dev;
502 	struct mlx5_flow_namespace *root_ns;
503 	struct mlx5_flow_table *fdb = NULL;
504 	int esw_size, err = 0;
505 	u32 flags = 0;
506 	u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
507 				MLX5_CAP_GEN(dev, max_flow_counter_15_0);
508 
509 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
510 	if (!root_ns) {
511 		esw_warn(dev, "Failed to get FDB flow namespace\n");
512 		err = -EOPNOTSUPP;
513 		goto out_namespace;
514 	}
515 
516 	esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
517 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
518 		  max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
519 
520 	esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
521 			 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
522 
523 	if (mlx5_esw_has_fwd_fdb(dev))
524 		esw_size >>= 1;
525 
526 	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
527 		flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
528 
529 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
530 						  esw_size,
531 						  ESW_OFFLOADS_NUM_GROUPS, 0,
532 						  flags);
533 	if (IS_ERR(fdb)) {
534 		err = PTR_ERR(fdb);
535 		esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
536 		goto out_namespace;
537 	}
538 	esw->fdb_table.offloads.fast_fdb = fdb;
539 
540 	if (!mlx5_esw_has_fwd_fdb(dev))
541 		goto out_namespace;
542 
543 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
544 						  esw_size,
545 						  ESW_OFFLOADS_NUM_GROUPS, 1,
546 						  flags);
547 	if (IS_ERR(fdb)) {
548 		err = PTR_ERR(fdb);
549 		esw_warn(dev, "Failed to create fwd table err %d\n", err);
550 		goto out_ft;
551 	}
552 	esw->fdb_table.offloads.fwd_fdb = fdb;
553 
554 	return err;
555 
556 out_ft:
557 	mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
558 out_namespace:
559 	return err;
560 }
561 
562 static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
563 {
564 	if (mlx5_esw_has_fwd_fdb(esw->dev))
565 		mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
566 	mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
567 }
568 
569 #define MAX_PF_SQ 256
570 #define MAX_SQ_NVPORTS 32
571 
572 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
573 {
574 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
575 	struct mlx5_flow_table_attr ft_attr = {};
576 	struct mlx5_core_dev *dev = esw->dev;
577 	struct mlx5_flow_namespace *root_ns;
578 	struct mlx5_flow_table *fdb = NULL;
579 	int table_size, ix, err = 0;
580 	struct mlx5_flow_group *g;
581 	void *match_criteria;
582 	u32 *flow_group_in;
583 	u8 *dmac;
584 
585 	esw_debug(esw->dev, "Create offloads FDB Tables\n");
586 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
587 	if (!flow_group_in)
588 		return -ENOMEM;
589 
590 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
591 	if (!root_ns) {
592 		esw_warn(dev, "Failed to get FDB flow namespace\n");
593 		err = -EOPNOTSUPP;
594 		goto ns_err;
595 	}
596 
597 	err = esw_create_offloads_fast_fdb_table(esw);
598 	if (err)
599 		goto fast_fdb_err;
600 
601 	table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
602 
603 	ft_attr.max_fte = table_size;
604 	ft_attr.prio = FDB_SLOW_PATH;
605 
606 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
607 	if (IS_ERR(fdb)) {
608 		err = PTR_ERR(fdb);
609 		esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
610 		goto slow_fdb_err;
611 	}
612 	esw->fdb_table.offloads.slow_fdb = fdb;
613 
614 	/* create send-to-vport group */
615 	memset(flow_group_in, 0, inlen);
616 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
617 		 MLX5_MATCH_MISC_PARAMETERS);
618 
619 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
620 
621 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
622 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
623 
624 	ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
625 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
626 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
627 
628 	g = mlx5_create_flow_group(fdb, flow_group_in);
629 	if (IS_ERR(g)) {
630 		err = PTR_ERR(g);
631 		esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
632 		goto send_vport_err;
633 	}
634 	esw->fdb_table.offloads.send_to_vport_grp = g;
635 
636 	/* create miss group */
637 	memset(flow_group_in, 0, inlen);
638 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
639 		 MLX5_MATCH_OUTER_HEADERS);
640 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
641 				      match_criteria);
642 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
643 			    outer_headers.dmac_47_16);
644 	dmac[0] = 0x01;
645 
646 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
647 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
648 
649 	g = mlx5_create_flow_group(fdb, flow_group_in);
650 	if (IS_ERR(g)) {
651 		err = PTR_ERR(g);
652 		esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
653 		goto miss_err;
654 	}
655 	esw->fdb_table.offloads.miss_grp = g;
656 
657 	err = esw_add_fdb_miss_rule(esw);
658 	if (err)
659 		goto miss_rule_err;
660 
661 	return 0;
662 
663 miss_rule_err:
664 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
665 miss_err:
666 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
667 send_vport_err:
668 	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
669 slow_fdb_err:
670 	esw_destroy_offloads_fast_fdb_table(esw);
671 fast_fdb_err:
672 ns_err:
673 	kvfree(flow_group_in);
674 	return err;
675 }
676 
677 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
678 {
679 	if (!esw->fdb_table.offloads.fast_fdb)
680 		return;
681 
682 	esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
683 	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
684 	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
685 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
686 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
687 
688 	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
689 	esw_destroy_offloads_fast_fdb_table(esw);
690 }
691 
692 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
693 {
694 	struct mlx5_flow_table_attr ft_attr = {};
695 	struct mlx5_core_dev *dev = esw->dev;
696 	struct mlx5_flow_table *ft_offloads;
697 	struct mlx5_flow_namespace *ns;
698 	int err = 0;
699 
700 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
701 	if (!ns) {
702 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
703 		return -EOPNOTSUPP;
704 	}
705 
706 	ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
707 
708 	ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
709 	if (IS_ERR(ft_offloads)) {
710 		err = PTR_ERR(ft_offloads);
711 		esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
712 		return err;
713 	}
714 
715 	esw->offloads.ft_offloads = ft_offloads;
716 	return 0;
717 }
718 
719 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
720 {
721 	struct mlx5_esw_offload *offloads = &esw->offloads;
722 
723 	mlx5_destroy_flow_table(offloads->ft_offloads);
724 }
725 
726 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
727 {
728 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
729 	struct mlx5_flow_group *g;
730 	struct mlx5_priv *priv = &esw->dev->priv;
731 	u32 *flow_group_in;
732 	void *match_criteria, *misc;
733 	int err = 0;
734 	int nvports = priv->sriov.num_vfs + 2;
735 
736 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
737 	if (!flow_group_in)
738 		return -ENOMEM;
739 
740 	/* create vport rx group */
741 	memset(flow_group_in, 0, inlen);
742 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
743 		 MLX5_MATCH_MISC_PARAMETERS);
744 
745 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
746 	misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
747 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
748 
749 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
750 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
751 
752 	g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
753 
754 	if (IS_ERR(g)) {
755 		err = PTR_ERR(g);
756 		mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
757 		goto out;
758 	}
759 
760 	esw->offloads.vport_rx_group = g;
761 out:
762 	kvfree(flow_group_in);
763 	return err;
764 }
765 
766 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
767 {
768 	mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
769 }
770 
771 struct mlx5_flow_handle *
772 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
773 {
774 	struct mlx5_flow_act flow_act = {0};
775 	struct mlx5_flow_destination dest = {};
776 	struct mlx5_flow_handle *flow_rule;
777 	struct mlx5_flow_spec *spec;
778 	void *misc;
779 
780 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
781 	if (!spec) {
782 		flow_rule = ERR_PTR(-ENOMEM);
783 		goto out;
784 	}
785 
786 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
787 	MLX5_SET(fte_match_set_misc, misc, source_port, vport);
788 
789 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
790 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
791 
792 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
793 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
794 	dest.tir_num = tirn;
795 
796 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
797 	flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
798 					&flow_act, &dest, 1);
799 	if (IS_ERR(flow_rule)) {
800 		esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
801 		goto out;
802 	}
803 
804 out:
805 	kvfree(spec);
806 	return flow_rule;
807 }
808 
809 static int esw_offloads_start(struct mlx5_eswitch *esw)
810 {
811 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
812 
813 	if (esw->mode != SRIOV_LEGACY) {
814 		esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
815 		return -EINVAL;
816 	}
817 
818 	mlx5_eswitch_disable_sriov(esw);
819 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
820 	if (err) {
821 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
822 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
823 		if (err1)
824 			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
825 	}
826 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
827 		if (mlx5_eswitch_inline_mode_get(esw,
828 						 num_vfs,
829 						 &esw->offloads.inline_mode)) {
830 			esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
831 			esw_warn(esw->dev, "Inline mode is different between vports\n");
832 		}
833 	}
834 	return err;
835 }
836 
837 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
838 {
839 	kfree(esw->offloads.vport_reps);
840 }
841 
842 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
843 {
844 	int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
845 	struct mlx5_core_dev *dev = esw->dev;
846 	struct mlx5_esw_offload *offloads;
847 	struct mlx5_eswitch_rep *rep;
848 	u8 hw_id[ETH_ALEN];
849 	int vport;
850 
851 	esw->offloads.vport_reps = kcalloc(total_vfs,
852 					   sizeof(struct mlx5_eswitch_rep),
853 					   GFP_KERNEL);
854 	if (!esw->offloads.vport_reps)
855 		return -ENOMEM;
856 
857 	offloads = &esw->offloads;
858 	mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
859 
860 	for (vport = 0; vport < total_vfs; vport++) {
861 		rep = &offloads->vport_reps[vport];
862 
863 		rep->vport = vport;
864 		ether_addr_copy(rep->hw_id, hw_id);
865 	}
866 
867 	offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
868 
869 	return 0;
870 }
871 
872 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
873 					  u8 rep_type)
874 {
875 	struct mlx5_eswitch_rep *rep;
876 	int vport;
877 
878 	for (vport = nvports - 1; vport >= 0; vport--) {
879 		rep = &esw->offloads.vport_reps[vport];
880 		if (!rep->rep_if[rep_type].valid)
881 			continue;
882 
883 		rep->rep_if[rep_type].unload(rep);
884 	}
885 }
886 
887 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
888 {
889 	u8 rep_type = NUM_REP_TYPES;
890 
891 	while (rep_type-- > 0)
892 		esw_offloads_unload_reps_type(esw, nvports, rep_type);
893 }
894 
895 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
896 				       u8 rep_type)
897 {
898 	struct mlx5_eswitch_rep *rep;
899 	int vport;
900 	int err;
901 
902 	for (vport = 0; vport < nvports; vport++) {
903 		rep = &esw->offloads.vport_reps[vport];
904 		if (!rep->rep_if[rep_type].valid)
905 			continue;
906 
907 		err = rep->rep_if[rep_type].load(esw->dev, rep);
908 		if (err)
909 			goto err_reps;
910 	}
911 
912 	return 0;
913 
914 err_reps:
915 	esw_offloads_unload_reps_type(esw, vport, rep_type);
916 	return err;
917 }
918 
919 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
920 {
921 	u8 rep_type = 0;
922 	int err;
923 
924 	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
925 		err = esw_offloads_load_reps_type(esw, nvports, rep_type);
926 		if (err)
927 			goto err_reps;
928 	}
929 
930 	return err;
931 
932 err_reps:
933 	while (rep_type-- > 0)
934 		esw_offloads_unload_reps_type(esw, nvports, rep_type);
935 	return err;
936 }
937 
938 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
939 {
940 	int err;
941 
942 	err = esw_create_offloads_fdb_tables(esw, nvports);
943 	if (err)
944 		return err;
945 
946 	err = esw_create_offloads_table(esw);
947 	if (err)
948 		goto create_ft_err;
949 
950 	err = esw_create_vport_rx_group(esw);
951 	if (err)
952 		goto create_fg_err;
953 
954 	err = esw_offloads_load_reps(esw, nvports);
955 	if (err)
956 		goto err_reps;
957 
958 	return 0;
959 
960 err_reps:
961 	esw_destroy_vport_rx_group(esw);
962 
963 create_fg_err:
964 	esw_destroy_offloads_table(esw);
965 
966 create_ft_err:
967 	esw_destroy_offloads_fdb_tables(esw);
968 
969 	return err;
970 }
971 
972 static int esw_offloads_stop(struct mlx5_eswitch *esw)
973 {
974 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
975 
976 	mlx5_eswitch_disable_sriov(esw);
977 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
978 	if (err) {
979 		esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
980 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
981 		if (err1)
982 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
983 	}
984 
985 	/* enable back PF RoCE */
986 	mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
987 
988 	return err;
989 }
990 
991 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
992 {
993 	esw_offloads_unload_reps(esw, nvports);
994 	esw_destroy_vport_rx_group(esw);
995 	esw_destroy_offloads_table(esw);
996 	esw_destroy_offloads_fdb_tables(esw);
997 }
998 
999 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1000 {
1001 	switch (mode) {
1002 	case DEVLINK_ESWITCH_MODE_LEGACY:
1003 		*mlx5_mode = SRIOV_LEGACY;
1004 		break;
1005 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1006 		*mlx5_mode = SRIOV_OFFLOADS;
1007 		break;
1008 	default:
1009 		return -EINVAL;
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1016 {
1017 	switch (mlx5_mode) {
1018 	case SRIOV_LEGACY:
1019 		*mode = DEVLINK_ESWITCH_MODE_LEGACY;
1020 		break;
1021 	case SRIOV_OFFLOADS:
1022 		*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1023 		break;
1024 	default:
1025 		return -EINVAL;
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1032 {
1033 	switch (mode) {
1034 	case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1035 		*mlx5_mode = MLX5_INLINE_MODE_NONE;
1036 		break;
1037 	case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1038 		*mlx5_mode = MLX5_INLINE_MODE_L2;
1039 		break;
1040 	case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1041 		*mlx5_mode = MLX5_INLINE_MODE_IP;
1042 		break;
1043 	case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1044 		*mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1045 		break;
1046 	default:
1047 		return -EINVAL;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1054 {
1055 	switch (mlx5_mode) {
1056 	case MLX5_INLINE_MODE_NONE:
1057 		*mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1058 		break;
1059 	case MLX5_INLINE_MODE_L2:
1060 		*mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1061 		break;
1062 	case MLX5_INLINE_MODE_IP:
1063 		*mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1064 		break;
1065 	case MLX5_INLINE_MODE_TCP_UDP:
1066 		*mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1067 		break;
1068 	default:
1069 		return -EINVAL;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1076 {
1077 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1078 
1079 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1080 		return -EOPNOTSUPP;
1081 
1082 	if (!MLX5_CAP_GEN(dev, vport_group_manager))
1083 		return -EOPNOTSUPP;
1084 
1085 	if (dev->priv.eswitch->mode == SRIOV_NONE)
1086 		return -EOPNOTSUPP;
1087 
1088 	return 0;
1089 }
1090 
1091 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
1092 {
1093 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1094 	u16 cur_mlx5_mode, mlx5_mode = 0;
1095 	int err;
1096 
1097 	err = mlx5_devlink_eswitch_check(devlink);
1098 	if (err)
1099 		return err;
1100 
1101 	cur_mlx5_mode = dev->priv.eswitch->mode;
1102 
1103 	if (esw_mode_from_devlink(mode, &mlx5_mode))
1104 		return -EINVAL;
1105 
1106 	if (cur_mlx5_mode == mlx5_mode)
1107 		return 0;
1108 
1109 	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1110 		return esw_offloads_start(dev->priv.eswitch);
1111 	else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1112 		return esw_offloads_stop(dev->priv.eswitch);
1113 	else
1114 		return -EINVAL;
1115 }
1116 
1117 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1118 {
1119 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1120 	int err;
1121 
1122 	err = mlx5_devlink_eswitch_check(devlink);
1123 	if (err)
1124 		return err;
1125 
1126 	return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1127 }
1128 
1129 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1130 {
1131 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1132 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1133 	int err, vport;
1134 	u8 mlx5_mode;
1135 
1136 	err = mlx5_devlink_eswitch_check(devlink);
1137 	if (err)
1138 		return err;
1139 
1140 	switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1141 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1142 		if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1143 			return 0;
1144 		/* fall through */
1145 	case MLX5_CAP_INLINE_MODE_L2:
1146 		esw_warn(dev, "Inline mode can't be set\n");
1147 		return -EOPNOTSUPP;
1148 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1149 		break;
1150 	}
1151 
1152 	if (esw->offloads.num_flows > 0) {
1153 		esw_warn(dev, "Can't set inline mode when flows are configured\n");
1154 		return -EOPNOTSUPP;
1155 	}
1156 
1157 	err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1158 	if (err)
1159 		goto out;
1160 
1161 	for (vport = 1; vport < esw->enabled_vports; vport++) {
1162 		err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1163 		if (err) {
1164 			esw_warn(dev, "Failed to set min inline on vport %d\n",
1165 				 vport);
1166 			goto revert_inline_mode;
1167 		}
1168 	}
1169 
1170 	esw->offloads.inline_mode = mlx5_mode;
1171 	return 0;
1172 
1173 revert_inline_mode:
1174 	while (--vport > 0)
1175 		mlx5_modify_nic_vport_min_inline(dev,
1176 						 vport,
1177 						 esw->offloads.inline_mode);
1178 out:
1179 	return err;
1180 }
1181 
1182 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1183 {
1184 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1185 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1186 	int err;
1187 
1188 	err = mlx5_devlink_eswitch_check(devlink);
1189 	if (err)
1190 		return err;
1191 
1192 	return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1193 }
1194 
1195 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1196 {
1197 	u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1198 	struct mlx5_core_dev *dev = esw->dev;
1199 	int vport;
1200 
1201 	if (!MLX5_CAP_GEN(dev, vport_group_manager))
1202 		return -EOPNOTSUPP;
1203 
1204 	if (esw->mode == SRIOV_NONE)
1205 		return -EOPNOTSUPP;
1206 
1207 	switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1208 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1209 		mlx5_mode = MLX5_INLINE_MODE_NONE;
1210 		goto out;
1211 	case MLX5_CAP_INLINE_MODE_L2:
1212 		mlx5_mode = MLX5_INLINE_MODE_L2;
1213 		goto out;
1214 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1215 		goto query_vports;
1216 	}
1217 
1218 query_vports:
1219 	for (vport = 1; vport <= nvfs; vport++) {
1220 		mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1221 		if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1222 			return -EINVAL;
1223 		prev_mlx5_mode = mlx5_mode;
1224 	}
1225 
1226 out:
1227 	*mode = mlx5_mode;
1228 	return 0;
1229 }
1230 
1231 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1232 {
1233 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1234 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1235 	int err;
1236 
1237 	err = mlx5_devlink_eswitch_check(devlink);
1238 	if (err)
1239 		return err;
1240 
1241 	if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1242 	    (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1243 	     !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1244 		return -EOPNOTSUPP;
1245 
1246 	if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1247 		return -EOPNOTSUPP;
1248 
1249 	if (esw->mode == SRIOV_LEGACY) {
1250 		esw->offloads.encap = encap;
1251 		return 0;
1252 	}
1253 
1254 	if (esw->offloads.encap == encap)
1255 		return 0;
1256 
1257 	if (esw->offloads.num_flows > 0) {
1258 		esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1259 		return -EOPNOTSUPP;
1260 	}
1261 
1262 	esw_destroy_offloads_fast_fdb_table(esw);
1263 
1264 	esw->offloads.encap = encap;
1265 	err = esw_create_offloads_fast_fdb_table(esw);
1266 	if (err) {
1267 		esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1268 		esw->offloads.encap = !encap;
1269 		(void)esw_create_offloads_fast_fdb_table(esw);
1270 	}
1271 	return err;
1272 }
1273 
1274 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1275 {
1276 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1277 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1278 	int err;
1279 
1280 	err = mlx5_devlink_eswitch_check(devlink);
1281 	if (err)
1282 		return err;
1283 
1284 	*encap = esw->offloads.encap;
1285 	return 0;
1286 }
1287 
1288 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1289 				     int vport_index,
1290 				     struct mlx5_eswitch_rep_if *__rep_if,
1291 				     u8 rep_type)
1292 {
1293 	struct mlx5_esw_offload *offloads = &esw->offloads;
1294 	struct mlx5_eswitch_rep_if *rep_if;
1295 
1296 	rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1297 
1298 	rep_if->load   = __rep_if->load;
1299 	rep_if->unload = __rep_if->unload;
1300 	rep_if->get_proto_dev = __rep_if->get_proto_dev;
1301 	rep_if->priv = __rep_if->priv;
1302 
1303 	rep_if->valid = true;
1304 }
1305 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1306 
1307 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1308 				       int vport_index, u8 rep_type)
1309 {
1310 	struct mlx5_esw_offload *offloads = &esw->offloads;
1311 	struct mlx5_eswitch_rep *rep;
1312 
1313 	rep = &offloads->vport_reps[vport_index];
1314 
1315 	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1316 		rep->rep_if[rep_type].unload(rep);
1317 
1318 	rep->rep_if[rep_type].valid = false;
1319 }
1320 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1321 
1322 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1323 {
1324 #define UPLINK_REP_INDEX 0
1325 	struct mlx5_esw_offload *offloads = &esw->offloads;
1326 	struct mlx5_eswitch_rep *rep;
1327 
1328 	rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1329 	return rep->rep_if[rep_type].priv;
1330 }
1331 
1332 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1333 				 int vport,
1334 				 u8 rep_type)
1335 {
1336 	struct mlx5_esw_offload *offloads = &esw->offloads;
1337 	struct mlx5_eswitch_rep *rep;
1338 
1339 	if (vport == FDB_UPLINK_VPORT)
1340 		vport = UPLINK_REP_INDEX;
1341 
1342 	rep = &offloads->vport_reps[vport];
1343 
1344 	if (rep->rep_if[rep_type].valid &&
1345 	    rep->rep_if[rep_type].get_proto_dev)
1346 		return rep->rep_if[rep_type].get_proto_dev(rep);
1347 	return NULL;
1348 }
1349 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1350 
1351 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1352 {
1353 	return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1354 }
1355 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1356 
1357 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1358 						int vport)
1359 {
1360 	return &esw->offloads.vport_reps[vport];
1361 }
1362 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
1363