1ea651a86SVu Pham // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2ea651a86SVu Pham /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3ea651a86SVu Pham 
4ea651a86SVu Pham #include "mlx5_core.h"
5ea651a86SVu Pham #include "eswitch.h"
6ea651a86SVu Pham #include "helper.h"
7ea651a86SVu Pham #include "ofld.h"
8ea651a86SVu Pham 
esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport * vport)9bf773dc0SVu Pham static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
10ea651a86SVu Pham {
11bf773dc0SVu Pham 	if (!vport->egress.offloads.fwd_rule)
12bf773dc0SVu Pham 		return;
13ea651a86SVu Pham 
14bf773dc0SVu Pham 	mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
15bf773dc0SVu Pham 	vport->egress.offloads.fwd_rule = NULL;
16bf773dc0SVu Pham }
17bf773dc0SVu Pham 
esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport * vport,int rule_index)18*014e4d48SShay Drory void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index)
19*014e4d48SShay Drory {
20*014e4d48SShay Drory 	struct mlx5_flow_handle *bounce_rule =
21*014e4d48SShay Drory 		xa_load(&vport->egress.offloads.bounce_rules, rule_index);
22*014e4d48SShay Drory 
23*014e4d48SShay Drory 	if (!bounce_rule)
24*014e4d48SShay Drory 		return;
25*014e4d48SShay Drory 
26*014e4d48SShay Drory 	mlx5_del_flow_rules(bounce_rule);
27*014e4d48SShay Drory 	xa_erase(&vport->egress.offloads.bounce_rules, rule_index);
28*014e4d48SShay Drory }
29*014e4d48SShay Drory 
esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport * vport)305e0202ebSShay Drory static void esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport *vport)
31db202995SMark Bloch {
325e0202ebSShay Drory 	struct mlx5_flow_handle *bounce_rule;
335e0202ebSShay Drory 	unsigned long i;
34db202995SMark Bloch 
355e0202ebSShay Drory 	xa_for_each(&vport->egress.offloads.bounce_rules, i, bounce_rule) {
365e0202ebSShay Drory 		mlx5_del_flow_rules(bounce_rule);
375e0202ebSShay Drory 		xa_erase(&vport->egress.offloads.bounce_rules, i);
385e0202ebSShay Drory 	}
39db202995SMark Bloch }
40db202995SMark Bloch 
esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)41bf773dc0SVu Pham static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
42bf773dc0SVu Pham 						struct mlx5_vport *vport,
43bf773dc0SVu Pham 						struct mlx5_flow_destination *fwd_dest)
44bf773dc0SVu Pham {
45bf773dc0SVu Pham 	struct mlx5_flow_act flow_act = {};
46bf773dc0SVu Pham 	int err = 0;
47bf773dc0SVu Pham 
48bf773dc0SVu Pham 	esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n",
49bf773dc0SVu Pham 		  vport->vport, fwd_dest->vport.num);
50bf773dc0SVu Pham 
51bf773dc0SVu Pham 	/* Delete the old egress forward-to-vport rule if any */
52bf773dc0SVu Pham 	esw_acl_egress_ofld_fwd2vport_destroy(vport);
53bf773dc0SVu Pham 
54bf773dc0SVu Pham 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
55bf773dc0SVu Pham 
56bf773dc0SVu Pham 	vport->egress.offloads.fwd_rule =
57bf773dc0SVu Pham 		mlx5_add_flow_rules(vport->egress.acl, NULL,
58bf773dc0SVu Pham 				    &flow_act, fwd_dest, 1);
59bf773dc0SVu Pham 	if (IS_ERR(vport->egress.offloads.fwd_rule)) {
60bf773dc0SVu Pham 		err = PTR_ERR(vport->egress.offloads.fwd_rule);
61bf773dc0SVu Pham 		esw_warn(esw->dev,
62bf773dc0SVu Pham 			 "vport(%d) failed to add fwd2vport acl rule err(%d)\n",
63bf773dc0SVu Pham 			 vport->vport, err);
64bf773dc0SVu Pham 		vport->egress.offloads.fwd_rule = NULL;
65bf773dc0SVu Pham 	}
66bf773dc0SVu Pham 
67bf773dc0SVu Pham 	return err;
68bf773dc0SVu Pham }
69bf773dc0SVu Pham 
esw_acl_egress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)70bf773dc0SVu Pham static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw,
71bf773dc0SVu Pham 					    struct mlx5_vport *vport,
72bf773dc0SVu Pham 					    struct mlx5_flow_destination *fwd_dest)
73bf773dc0SVu Pham {
74bf773dc0SVu Pham 	int err = 0;
75bf773dc0SVu Pham 	int action;
76bf773dc0SVu Pham 
77bf773dc0SVu Pham 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
78ea651a86SVu Pham 		/* For prio tag mode, there is only 1 FTEs:
79ea651a86SVu Pham 		 * 1) prio tag packets - pop the prio tag VLAN, allow
80ea651a86SVu Pham 		 * Unmatched traffic is allowed by default
81ea651a86SVu Pham 		 */
82ea651a86SVu Pham 		esw_debug(esw->dev,
83ea651a86SVu Pham 			  "vport[%d] configure prio tag egress rules\n", vport->vport);
84ea651a86SVu Pham 
85bf773dc0SVu Pham 		action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
86bf773dc0SVu Pham 		action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
87bf773dc0SVu Pham 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW;
88bf773dc0SVu Pham 
89ea651a86SVu Pham 		/* prio tag vlan rule - pop it so vport receives untagged packets */
90bf773dc0SVu Pham 		err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action);
91bf773dc0SVu Pham 		if (err)
92bf773dc0SVu Pham 			goto prio_err;
93bf773dc0SVu Pham 	}
94bf773dc0SVu Pham 
95bf773dc0SVu Pham 	if (fwd_dest) {
96bf773dc0SVu Pham 		err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest);
97bf773dc0SVu Pham 		if (err)
98bf773dc0SVu Pham 			goto fwd_err;
99bf773dc0SVu Pham 	}
100bf773dc0SVu Pham 
101bf773dc0SVu Pham 	return 0;
102bf773dc0SVu Pham 
103bf773dc0SVu Pham fwd_err:
104bf773dc0SVu Pham 	esw_acl_egress_vlan_destroy(vport);
105bf773dc0SVu Pham prio_err:
106bf773dc0SVu Pham 	return err;
107ea651a86SVu Pham }
108ea651a86SVu Pham 
esw_acl_egress_ofld_rules_destroy(struct mlx5_vport * vport)109ea651a86SVu Pham static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
110ea651a86SVu Pham {
111ea651a86SVu Pham 	esw_acl_egress_vlan_destroy(vport);
112bf773dc0SVu Pham 	esw_acl_egress_ofld_fwd2vport_destroy(vport);
1135e0202ebSShay Drory 	esw_acl_egress_ofld_bounce_rules_destroy(vport);
114ea651a86SVu Pham }
115ea651a86SVu Pham 
esw_acl_egress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)116ea651a86SVu Pham static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
117ea651a86SVu Pham 					     struct mlx5_vport *vport)
118ea651a86SVu Pham {
119bf773dc0SVu Pham 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
120bf773dc0SVu Pham 	struct mlx5_flow_group *fwd_grp;
121bf773dc0SVu Pham 	u32 *flow_group_in;
122bf773dc0SVu Pham 	u32 flow_index = 0;
123bf773dc0SVu Pham 	int ret = 0;
124bf773dc0SVu Pham 
125bf773dc0SVu Pham 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
126bf773dc0SVu Pham 		ret = esw_acl_egress_vlan_grp_create(esw, vport);
127bf773dc0SVu Pham 		if (ret)
128bf773dc0SVu Pham 			return ret;
129bf773dc0SVu Pham 
130bf773dc0SVu Pham 		flow_index++;
131bf773dc0SVu Pham 	}
132bf773dc0SVu Pham 
133bf773dc0SVu Pham 	if (!mlx5_esw_acl_egress_fwd2vport_supported(esw))
134bf773dc0SVu Pham 		goto out;
135bf773dc0SVu Pham 
136bf773dc0SVu Pham 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
137bf773dc0SVu Pham 	if (!flow_group_in) {
138bf773dc0SVu Pham 		ret = -ENOMEM;
139bf773dc0SVu Pham 		goto fwd_grp_err;
140bf773dc0SVu Pham 	}
141bf773dc0SVu Pham 
142bf773dc0SVu Pham 	/* This group holds 1 FTE to forward all packets to other vport
143bf773dc0SVu Pham 	 * when bond vports is supported.
144bf773dc0SVu Pham 	 */
145bf773dc0SVu Pham 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
146bf773dc0SVu Pham 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
147bf773dc0SVu Pham 	fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
148bf773dc0SVu Pham 	if (IS_ERR(fwd_grp)) {
149bf773dc0SVu Pham 		ret = PTR_ERR(fwd_grp);
150bf773dc0SVu Pham 		esw_warn(esw->dev,
151bf773dc0SVu Pham 			 "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n",
152bf773dc0SVu Pham 			 vport->vport, ret);
153bf773dc0SVu Pham 		kvfree(flow_group_in);
154bf773dc0SVu Pham 		goto fwd_grp_err;
155bf773dc0SVu Pham 	}
156bf773dc0SVu Pham 	vport->egress.offloads.fwd_grp = fwd_grp;
157bf773dc0SVu Pham 	kvfree(flow_group_in);
158ea651a86SVu Pham 	return 0;
159ea651a86SVu Pham 
160bf773dc0SVu Pham fwd_grp_err:
161bf773dc0SVu Pham 	esw_acl_egress_vlan_grp_destroy(vport);
162bf773dc0SVu Pham out:
163bf773dc0SVu Pham 	return ret;
164ea651a86SVu Pham }
165ea651a86SVu Pham 
esw_acl_egress_ofld_groups_destroy(struct mlx5_vport * vport)166ea651a86SVu Pham static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
167ea651a86SVu Pham {
168bf773dc0SVu Pham 	if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) {
169bf773dc0SVu Pham 		mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
170bf773dc0SVu Pham 		vport->egress.offloads.fwd_grp = NULL;
171bf773dc0SVu Pham 	}
172db202995SMark Bloch 
173db202995SMark Bloch 	if (!IS_ERR_OR_NULL(vport->egress.offloads.bounce_grp)) {
174db202995SMark Bloch 		mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
175db202995SMark Bloch 		vport->egress.offloads.bounce_grp = NULL;
176db202995SMark Bloch 	}
177db202995SMark Bloch 
178ea651a86SVu Pham 	esw_acl_egress_vlan_grp_destroy(vport);
179ea651a86SVu Pham }
180ea651a86SVu Pham 
esw_acl_egress_needed(struct mlx5_eswitch * esw,u16 vport_num)18147dd7e60SParav Pandit static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
1822c40db2fSParav Pandit {
183d7f33a45SVu Pham 	return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
1842c40db2fSParav Pandit }
1852c40db2fSParav Pandit 
esw_acl_egress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)186ea651a86SVu Pham int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
187ea651a86SVu Pham {
188bf773dc0SVu Pham 	int table_size = 0;
189ea651a86SVu Pham 	int err;
190ea651a86SVu Pham 
191bf773dc0SVu Pham 	if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) &&
192bf773dc0SVu Pham 	    !MLX5_CAP_GEN(esw->dev, prio_tag_required))
193ea651a86SVu Pham 		return 0;
194ea651a86SVu Pham 
1952c40db2fSParav Pandit 	if (!esw_acl_egress_needed(esw, vport->vport))
1962c40db2fSParav Pandit 		return 0;
1972c40db2fSParav Pandit 
198ea651a86SVu Pham 	esw_acl_egress_ofld_rules_destroy(vport);
199ea651a86SVu Pham 
200bf773dc0SVu Pham 	if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
201bf773dc0SVu Pham 		table_size++;
202bf773dc0SVu Pham 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
203bf773dc0SVu Pham 		table_size++;
20447dd7e60SParav Pandit 	vport->egress.acl = esw_acl_table_create(esw, vport,
205bf773dc0SVu Pham 						 MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
2060c4accc4SYueHaibing 	if (IS_ERR(vport->egress.acl)) {
207ea651a86SVu Pham 		err = PTR_ERR(vport->egress.acl);
208ea651a86SVu Pham 		vport->egress.acl = NULL;
209ea651a86SVu Pham 		return err;
210ea651a86SVu Pham 	}
2115e0202ebSShay Drory 	vport->egress.type = VPORT_EGRESS_ACL_TYPE_DEFAULT;
212ea651a86SVu Pham 
213ea651a86SVu Pham 	err = esw_acl_egress_ofld_groups_create(esw, vport);
214ea651a86SVu Pham 	if (err)
215ea651a86SVu Pham 		goto group_err;
216ea651a86SVu Pham 
217ea651a86SVu Pham 	esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport);
218ea651a86SVu Pham 
219bf773dc0SVu Pham 	err = esw_acl_egress_ofld_rules_create(esw, vport, NULL);
220ea651a86SVu Pham 	if (err)
221ea651a86SVu Pham 		goto rules_err;
222ea651a86SVu Pham 
223ea651a86SVu Pham 	return 0;
224ea651a86SVu Pham 
225ea651a86SVu Pham rules_err:
226ea651a86SVu Pham 	esw_acl_egress_ofld_groups_destroy(vport);
227ea651a86SVu Pham group_err:
228ea651a86SVu Pham 	esw_acl_egress_table_destroy(vport);
229ea651a86SVu Pham 	return err;
230ea651a86SVu Pham }
231ea651a86SVu Pham 
esw_acl_egress_ofld_cleanup(struct mlx5_vport * vport)232ea651a86SVu Pham void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport)
233ea651a86SVu Pham {
234ea651a86SVu Pham 	esw_acl_egress_ofld_rules_destroy(vport);
235ea651a86SVu Pham 	esw_acl_egress_ofld_groups_destroy(vport);
236ea651a86SVu Pham 	esw_acl_egress_table_destroy(vport);
237ea651a86SVu Pham }
238bf773dc0SVu Pham 
mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch * esw,u16 active_vport_num,u16 passive_vport_num)239bf773dc0SVu Pham int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
240bf773dc0SVu Pham 				   u16 passive_vport_num)
241bf773dc0SVu Pham {
242bf773dc0SVu Pham 	struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num);
243bf773dc0SVu Pham 	struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num);
244bf773dc0SVu Pham 	struct mlx5_flow_destination fwd_dest = {};
245bf773dc0SVu Pham 
246bf773dc0SVu Pham 	if (IS_ERR(active_vport))
247bf773dc0SVu Pham 		return PTR_ERR(active_vport);
248bf773dc0SVu Pham 	if (IS_ERR(passive_vport))
249bf773dc0SVu Pham 		return PTR_ERR(passive_vport);
250bf773dc0SVu Pham 
251bf773dc0SVu Pham 	/* Cleanup and recreate rules WITHOUT fwd2vport of active vport */
252bf773dc0SVu Pham 	esw_acl_egress_ofld_rules_destroy(active_vport);
253bf773dc0SVu Pham 	esw_acl_egress_ofld_rules_create(esw, active_vport, NULL);
254bf773dc0SVu Pham 
255bf773dc0SVu Pham 	/* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */
256bf773dc0SVu Pham 	esw_acl_egress_ofld_rules_destroy(passive_vport);
257bf773dc0SVu Pham 	fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
258bf773dc0SVu Pham 	fwd_dest.vport.num = active_vport_num;
259bf773dc0SVu Pham 	fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
260bf773dc0SVu Pham 	fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
261bf773dc0SVu Pham 
262bf773dc0SVu Pham 	return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest);
263bf773dc0SVu Pham }
264bf773dc0SVu Pham 
mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch * esw,u16 vport_num)265bf773dc0SVu Pham int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num)
266bf773dc0SVu Pham {
267bf773dc0SVu Pham 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
268bf773dc0SVu Pham 
269bf773dc0SVu Pham 	if (IS_ERR(vport))
270bf773dc0SVu Pham 		return PTR_ERR(vport);
271bf773dc0SVu Pham 
272bf773dc0SVu Pham 	esw_acl_egress_ofld_rules_destroy(vport);
273bf773dc0SVu Pham 	return esw_acl_egress_ofld_rules_create(esw, vport, NULL);
274bf773dc0SVu Pham }
275