1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/list.h>
5 #include <linux/notifier.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "eswitch.h"
10 #include "bridge_priv.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/bridge_tracepoint.h"
13 
14 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
15 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
16 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
17 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
18 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
19 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
20 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
21 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
22 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
23 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
24 
25 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
26 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
27 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
28 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
29 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
30 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
31 
32 #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
33 
34 enum {
35 	MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
36 	MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
37 	MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
38 };
39 
40 static const struct rhashtable_params fdb_ht_params = {
41 	.key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
42 	.key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
43 	.head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
44 	.automatic_shrinking = true,
45 };
46 
47 enum {
48 	MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
49 };
50 
51 struct mlx5_esw_bridge {
52 	int ifindex;
53 	int refcnt;
54 	struct list_head list;
55 	struct mlx5_esw_bridge_offloads *br_offloads;
56 
57 	struct list_head fdb_list;
58 	struct rhashtable fdb_ht;
59 	struct xarray vports;
60 
61 	struct mlx5_flow_table *egress_ft;
62 	struct mlx5_flow_group *egress_vlan_fg;
63 	struct mlx5_flow_group *egress_mac_fg;
64 	unsigned long ageing_time;
65 	u32 flags;
66 };
67 
68 static void
69 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
70 				   unsigned long val)
71 {
72 	struct switchdev_notifier_fdb_info send_info;
73 
74 	send_info.addr = addr;
75 	send_info.vid = vid;
76 	send_info.offloaded = true;
77 	call_switchdev_notifiers(val, dev, &send_info.info, NULL);
78 }
79 
80 static struct mlx5_flow_table *
81 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
82 {
83 	struct mlx5_flow_table_attr ft_attr = {};
84 	struct mlx5_core_dev *dev = esw->dev;
85 	struct mlx5_flow_namespace *ns;
86 	struct mlx5_flow_table *fdb;
87 
88 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
89 	if (!ns) {
90 		esw_warn(dev, "Failed to get FDB namespace\n");
91 		return ERR_PTR(-ENOENT);
92 	}
93 
94 	ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
95 	ft_attr.max_fte = max_fte;
96 	ft_attr.level = level;
97 	ft_attr.prio = FDB_BR_OFFLOAD;
98 	fdb = mlx5_create_flow_table(ns, &ft_attr);
99 	if (IS_ERR(fdb))
100 		esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
101 
102 	return fdb;
103 }
104 
105 static struct mlx5_flow_group *
106 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
107 {
108 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
109 	struct mlx5_flow_group *fg;
110 	u32 *in, *match;
111 
112 	in = kvzalloc(inlen, GFP_KERNEL);
113 	if (!in)
114 		return ERR_PTR(-ENOMEM);
115 
116 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
117 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
118 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
119 
120 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
121 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
122 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
123 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
124 
125 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
126 		 mlx5_eswitch_get_vport_metadata_mask());
127 
128 	MLX5_SET(create_flow_group_in, in, start_flow_index,
129 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
130 	MLX5_SET(create_flow_group_in, in, end_flow_index,
131 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
132 
133 	fg = mlx5_create_flow_group(ingress_ft, in);
134 	kvfree(in);
135 	if (IS_ERR(fg))
136 		esw_warn(esw->dev,
137 			 "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
138 			 PTR_ERR(fg));
139 
140 	return fg;
141 }
142 
143 static struct mlx5_flow_group *
144 mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
145 					 struct mlx5_flow_table *ingress_ft)
146 {
147 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
148 	struct mlx5_flow_group *fg;
149 	u32 *in, *match;
150 
151 	in = kvzalloc(inlen, GFP_KERNEL);
152 	if (!in)
153 		return ERR_PTR(-ENOMEM);
154 
155 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
156 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
157 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
158 
159 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
160 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
161 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
162 
163 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
164 		 mlx5_eswitch_get_vport_metadata_mask());
165 
166 	MLX5_SET(create_flow_group_in, in, start_flow_index,
167 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
168 	MLX5_SET(create_flow_group_in, in, end_flow_index,
169 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
170 
171 	fg = mlx5_create_flow_group(ingress_ft, in);
172 	if (IS_ERR(fg))
173 		esw_warn(esw->dev,
174 			 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
175 			 PTR_ERR(fg));
176 
177 	kvfree(in);
178 	return fg;
179 }
180 
181 static struct mlx5_flow_group *
182 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
183 {
184 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
185 	struct mlx5_flow_group *fg;
186 	u32 *in, *match;
187 
188 	in = kvzalloc(inlen, GFP_KERNEL);
189 	if (!in)
190 		return ERR_PTR(-ENOMEM);
191 
192 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
193 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
194 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
195 
196 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
197 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
198 
199 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
200 		 mlx5_eswitch_get_vport_metadata_mask());
201 
202 	MLX5_SET(create_flow_group_in, in, start_flow_index,
203 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
204 	MLX5_SET(create_flow_group_in, in, end_flow_index,
205 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
206 
207 	fg = mlx5_create_flow_group(ingress_ft, in);
208 	if (IS_ERR(fg))
209 		esw_warn(esw->dev,
210 			 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
211 			 PTR_ERR(fg));
212 
213 	kvfree(in);
214 	return fg;
215 }
216 
217 static struct mlx5_flow_group *
218 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
219 {
220 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
221 	struct mlx5_flow_group *fg;
222 	u32 *in, *match;
223 
224 	in = kvzalloc(inlen, GFP_KERNEL);
225 	if (!in)
226 		return ERR_PTR(-ENOMEM);
227 
228 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
229 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
230 
231 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
232 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
233 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
234 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
235 
236 	MLX5_SET(create_flow_group_in, in, start_flow_index,
237 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
238 	MLX5_SET(create_flow_group_in, in, end_flow_index,
239 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
240 
241 	fg = mlx5_create_flow_group(egress_ft, in);
242 	if (IS_ERR(fg))
243 		esw_warn(esw->dev,
244 			 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
245 			 PTR_ERR(fg));
246 	kvfree(in);
247 	return fg;
248 }
249 
250 static struct mlx5_flow_group *
251 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
252 {
253 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
254 	struct mlx5_flow_group *fg;
255 	u32 *in, *match;
256 
257 	in = kvzalloc(inlen, GFP_KERNEL);
258 	if (!in)
259 		return ERR_PTR(-ENOMEM);
260 
261 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
262 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
263 
264 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
265 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
266 
267 	MLX5_SET(create_flow_group_in, in, start_flow_index,
268 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
269 	MLX5_SET(create_flow_group_in, in, end_flow_index,
270 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
271 
272 	fg = mlx5_create_flow_group(egress_ft, in);
273 	if (IS_ERR(fg))
274 		esw_warn(esw->dev,
275 			 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
276 			 PTR_ERR(fg));
277 	kvfree(in);
278 	return fg;
279 }
280 
281 static int
282 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
283 {
284 	struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
285 	struct mlx5_flow_table *ingress_ft, *skip_ft;
286 	int err;
287 
288 	if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
289 		return -EOPNOTSUPP;
290 
291 	ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
292 						  MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
293 						  br_offloads->esw);
294 	if (IS_ERR(ingress_ft))
295 		return PTR_ERR(ingress_ft);
296 
297 	skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
298 					       MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
299 					       br_offloads->esw);
300 	if (IS_ERR(skip_ft)) {
301 		err = PTR_ERR(skip_ft);
302 		goto err_skip_tbl;
303 	}
304 
305 	vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
306 	if (IS_ERR(vlan_fg)) {
307 		err = PTR_ERR(vlan_fg);
308 		goto err_vlan_fg;
309 	}
310 
311 	filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
312 	if (IS_ERR(filter_fg)) {
313 		err = PTR_ERR(filter_fg);
314 		goto err_filter_fg;
315 	}
316 
317 	mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
318 	if (IS_ERR(mac_fg)) {
319 		err = PTR_ERR(mac_fg);
320 		goto err_mac_fg;
321 	}
322 
323 	br_offloads->ingress_ft = ingress_ft;
324 	br_offloads->skip_ft = skip_ft;
325 	br_offloads->ingress_vlan_fg = vlan_fg;
326 	br_offloads->ingress_filter_fg = filter_fg;
327 	br_offloads->ingress_mac_fg = mac_fg;
328 	return 0;
329 
330 err_mac_fg:
331 	mlx5_destroy_flow_group(filter_fg);
332 err_filter_fg:
333 	mlx5_destroy_flow_group(vlan_fg);
334 err_vlan_fg:
335 	mlx5_destroy_flow_table(skip_ft);
336 err_skip_tbl:
337 	mlx5_destroy_flow_table(ingress_ft);
338 	return err;
339 }
340 
341 static void
342 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
343 {
344 	mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
345 	br_offloads->ingress_mac_fg = NULL;
346 	mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
347 	br_offloads->ingress_filter_fg = NULL;
348 	mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
349 	br_offloads->ingress_vlan_fg = NULL;
350 	mlx5_destroy_flow_table(br_offloads->skip_ft);
351 	br_offloads->skip_ft = NULL;
352 	mlx5_destroy_flow_table(br_offloads->ingress_ft);
353 	br_offloads->ingress_ft = NULL;
354 }
355 
356 static int
357 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
358 				  struct mlx5_esw_bridge *bridge)
359 {
360 	struct mlx5_flow_group *mac_fg, *vlan_fg;
361 	struct mlx5_flow_table *egress_ft;
362 	int err;
363 
364 	egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
365 						 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
366 						 br_offloads->esw);
367 	if (IS_ERR(egress_ft))
368 		return PTR_ERR(egress_ft);
369 
370 	vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
371 	if (IS_ERR(vlan_fg)) {
372 		err = PTR_ERR(vlan_fg);
373 		goto err_vlan_fg;
374 	}
375 
376 	mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
377 	if (IS_ERR(mac_fg)) {
378 		err = PTR_ERR(mac_fg);
379 		goto err_mac_fg;
380 	}
381 
382 	bridge->egress_ft = egress_ft;
383 	bridge->egress_vlan_fg = vlan_fg;
384 	bridge->egress_mac_fg = mac_fg;
385 	return 0;
386 
387 err_mac_fg:
388 	mlx5_destroy_flow_group(vlan_fg);
389 err_vlan_fg:
390 	mlx5_destroy_flow_table(egress_ft);
391 	return err;
392 }
393 
394 static void
395 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
396 {
397 	mlx5_destroy_flow_group(bridge->egress_mac_fg);
398 	mlx5_destroy_flow_group(bridge->egress_vlan_fg);
399 	mlx5_destroy_flow_table(bridge->egress_ft);
400 }
401 
402 static struct mlx5_flow_handle *
403 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
404 				    struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
405 				    struct mlx5_esw_bridge *bridge)
406 {
407 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
408 	struct mlx5_flow_act flow_act = {
409 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
410 		.flags = FLOW_ACT_NO_APPEND,
411 	};
412 	struct mlx5_flow_destination dests[2] = {};
413 	struct mlx5_flow_spec *rule_spec;
414 	struct mlx5_flow_handle *handle;
415 	u8 *smac_v, *smac_c;
416 
417 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
418 	if (!rule_spec)
419 		return ERR_PTR(-ENOMEM);
420 
421 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
422 
423 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
424 			      outer_headers.smac_47_16);
425 	ether_addr_copy(smac_v, addr);
426 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
427 			      outer_headers.smac_47_16);
428 	eth_broadcast_addr(smac_c);
429 
430 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
431 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
432 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
433 		 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
434 
435 	if (vlan && vlan->pkt_reformat_push) {
436 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
437 		flow_act.pkt_reformat = vlan->pkt_reformat_push;
438 	} else if (vlan) {
439 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
440 				 outer_headers.cvlan_tag);
441 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
442 				 outer_headers.cvlan_tag);
443 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
444 				 outer_headers.first_vid);
445 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
446 			 vlan->vid);
447 	}
448 
449 	dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
450 	dests[0].ft = bridge->egress_ft;
451 	dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
452 	dests[1].counter_id = counter_id;
453 
454 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
455 				     ARRAY_SIZE(dests));
456 
457 	kvfree(rule_spec);
458 	return handle;
459 }
460 
461 static struct mlx5_flow_handle *
462 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
463 					   struct mlx5_esw_bridge *bridge)
464 {
465 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
466 	struct mlx5_flow_destination dest = {
467 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
468 		.ft = br_offloads->skip_ft,
469 	};
470 	struct mlx5_flow_act flow_act = {
471 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
472 		.flags = FLOW_ACT_NO_APPEND,
473 	};
474 	struct mlx5_flow_spec *rule_spec;
475 	struct mlx5_flow_handle *handle;
476 	u8 *smac_v, *smac_c;
477 
478 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
479 	if (!rule_spec)
480 		return ERR_PTR(-ENOMEM);
481 
482 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
483 
484 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
485 			      outer_headers.smac_47_16);
486 	ether_addr_copy(smac_v, addr);
487 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
488 			      outer_headers.smac_47_16);
489 	eth_broadcast_addr(smac_c);
490 
491 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
492 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
493 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
494 		 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
495 
496 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
497 			 outer_headers.cvlan_tag);
498 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
499 			 outer_headers.cvlan_tag);
500 
501 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
502 
503 	kvfree(rule_spec);
504 	return handle;
505 }
506 
507 static struct mlx5_flow_handle *
508 mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
509 				   struct mlx5_esw_bridge_vlan *vlan,
510 				   struct mlx5_esw_bridge *bridge)
511 {
512 	struct mlx5_flow_destination dest = {
513 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
514 		.vport.num = vport_num,
515 	};
516 	struct mlx5_flow_act flow_act = {
517 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
518 		.flags = FLOW_ACT_NO_APPEND,
519 	};
520 	struct mlx5_flow_spec *rule_spec;
521 	struct mlx5_flow_handle *handle;
522 	u8 *dmac_v, *dmac_c;
523 
524 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
525 	if (!rule_spec)
526 		return ERR_PTR(-ENOMEM);
527 
528 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
529 
530 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
531 			      outer_headers.dmac_47_16);
532 	ether_addr_copy(dmac_v, addr);
533 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
534 			      outer_headers.dmac_47_16);
535 	eth_broadcast_addr(dmac_c);
536 
537 	if (vlan) {
538 		if (vlan->pkt_reformat_pop) {
539 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
540 			flow_act.pkt_reformat = vlan->pkt_reformat_pop;
541 		}
542 
543 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
544 				 outer_headers.cvlan_tag);
545 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
546 				 outer_headers.cvlan_tag);
547 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
548 				 outer_headers.first_vid);
549 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
550 			 vlan->vid);
551 	}
552 
553 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
554 
555 	kvfree(rule_spec);
556 	return handle;
557 }
558 
559 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
560 						      struct mlx5_esw_bridge_offloads *br_offloads)
561 {
562 	struct mlx5_esw_bridge *bridge;
563 	int err;
564 
565 	bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
566 	if (!bridge)
567 		return ERR_PTR(-ENOMEM);
568 
569 	bridge->br_offloads = br_offloads;
570 	err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
571 	if (err)
572 		goto err_egress_tbl;
573 
574 	err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
575 	if (err)
576 		goto err_fdb_ht;
577 
578 	INIT_LIST_HEAD(&bridge->fdb_list);
579 	xa_init(&bridge->vports);
580 	bridge->ifindex = ifindex;
581 	bridge->refcnt = 1;
582 	bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
583 	list_add(&bridge->list, &br_offloads->bridges);
584 
585 	return bridge;
586 
587 err_fdb_ht:
588 	mlx5_esw_bridge_egress_table_cleanup(bridge);
589 err_egress_tbl:
590 	kvfree(bridge);
591 	return ERR_PTR(err);
592 }
593 
594 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
595 {
596 	bridge->refcnt++;
597 }
598 
599 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
600 				struct mlx5_esw_bridge *bridge)
601 {
602 	if (--bridge->refcnt)
603 		return;
604 
605 	mlx5_esw_bridge_egress_table_cleanup(bridge);
606 	WARN_ON(!xa_empty(&bridge->vports));
607 	list_del(&bridge->list);
608 	rhashtable_destroy(&bridge->fdb_ht);
609 	kvfree(bridge);
610 
611 	if (list_empty(&br_offloads->bridges))
612 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
613 }
614 
615 static struct mlx5_esw_bridge *
616 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
617 {
618 	struct mlx5_esw_bridge *bridge;
619 
620 	ASSERT_RTNL();
621 
622 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
623 		if (bridge->ifindex == ifindex) {
624 			mlx5_esw_bridge_get(bridge);
625 			return bridge;
626 		}
627 	}
628 
629 	if (!br_offloads->ingress_ft) {
630 		int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
631 
632 		if (err)
633 			return ERR_PTR(err);
634 	}
635 
636 	bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
637 	if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
638 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
639 	return bridge;
640 }
641 
642 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
643 				       struct mlx5_esw_bridge *bridge)
644 {
645 	return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL);
646 }
647 
648 static struct mlx5_esw_bridge_port *
649 mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge)
650 {
651 	return xa_load(&bridge->vports, vport_num);
652 }
653 
654 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
655 				       struct mlx5_esw_bridge *bridge)
656 {
657 	xa_erase(&bridge->vports, port->vport_num);
658 }
659 
660 static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse,
661 					      struct mlx5_esw_bridge_fdb_entry *entry)
662 {
663 	trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
664 
665 	entry->lastuse = lastuse;
666 	mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
667 					   entry->key.vid,
668 					   SWITCHDEV_FDB_ADD_TO_BRIDGE);
669 }
670 
671 static void
672 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
673 				  struct mlx5_esw_bridge *bridge)
674 {
675 	trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
676 
677 	rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
678 	mlx5_del_flow_rules(entry->egress_handle);
679 	if (entry->filter_handle)
680 		mlx5_del_flow_rules(entry->filter_handle);
681 	mlx5_del_flow_rules(entry->ingress_handle);
682 	mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
683 	list_del(&entry->vlan_list);
684 	list_del(&entry->list);
685 	kvfree(entry);
686 }
687 
688 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
689 {
690 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
691 
692 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
693 		if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
694 			mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
695 							   entry->key.vid,
696 							   SWITCHDEV_FDB_DEL_TO_BRIDGE);
697 		mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
698 	}
699 }
700 
701 static struct mlx5_esw_bridge_vlan *
702 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
703 {
704 	return xa_load(&port->vlans, vid);
705 }
706 
707 static int
708 mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
709 {
710 	struct {
711 		__be16	h_vlan_proto;
712 		__be16	h_vlan_TCI;
713 	} vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
714 	struct mlx5_pkt_reformat_params reformat_params = {};
715 	struct mlx5_pkt_reformat *pkt_reformat;
716 
717 	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
718 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
719 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
720 	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
721 		esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
722 		return -EOPNOTSUPP;
723 	}
724 
725 	reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
726 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
727 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
728 	reformat_params.size = sizeof(vlan_hdr);
729 	reformat_params.data = &vlan_hdr;
730 	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
731 						  &reformat_params,
732 						  MLX5_FLOW_NAMESPACE_FDB);
733 	if (IS_ERR(pkt_reformat)) {
734 		esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
735 			 PTR_ERR(pkt_reformat));
736 		return PTR_ERR(pkt_reformat);
737 	}
738 
739 	vlan->pkt_reformat_push = pkt_reformat;
740 	return 0;
741 }
742 
743 static void
744 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
745 {
746 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
747 	vlan->pkt_reformat_push = NULL;
748 }
749 
750 static int
751 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
752 {
753 	struct mlx5_pkt_reformat_params reformat_params = {};
754 	struct mlx5_pkt_reformat *pkt_reformat;
755 
756 	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
757 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
758 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
759 	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
760 		esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
761 		return -EOPNOTSUPP;
762 	}
763 
764 	reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
765 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
766 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
767 	reformat_params.size = sizeof(struct vlan_hdr);
768 	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
769 						  &reformat_params,
770 						  MLX5_FLOW_NAMESPACE_FDB);
771 	if (IS_ERR(pkt_reformat)) {
772 		esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
773 			 PTR_ERR(pkt_reformat));
774 		return PTR_ERR(pkt_reformat);
775 	}
776 
777 	vlan->pkt_reformat_pop = pkt_reformat;
778 	return 0;
779 }
780 
781 static void
782 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
783 {
784 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
785 	vlan->pkt_reformat_pop = NULL;
786 }
787 
788 static struct mlx5_esw_bridge_vlan *
789 mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
790 			    struct mlx5_eswitch *esw)
791 {
792 	struct mlx5_esw_bridge_vlan *vlan;
793 	int err;
794 
795 	vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
796 	if (!vlan)
797 		return ERR_PTR(-ENOMEM);
798 
799 	vlan->vid = vid;
800 	vlan->flags = flags;
801 	INIT_LIST_HEAD(&vlan->fdb_list);
802 
803 	if (flags & BRIDGE_VLAN_INFO_PVID) {
804 		err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
805 		if (err)
806 			goto err_vlan_push;
807 	}
808 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
809 		err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
810 		if (err)
811 			goto err_vlan_pop;
812 	}
813 
814 	err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
815 	if (err)
816 		goto err_xa_insert;
817 
818 	trace_mlx5_esw_bridge_vlan_create(vlan);
819 	return vlan;
820 
821 err_xa_insert:
822 	if (vlan->pkt_reformat_pop)
823 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
824 err_vlan_pop:
825 	if (vlan->pkt_reformat_push)
826 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
827 err_vlan_push:
828 	kvfree(vlan);
829 	return ERR_PTR(err);
830 }
831 
832 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
833 				       struct mlx5_esw_bridge_vlan *vlan)
834 {
835 	xa_erase(&port->vlans, vlan->vid);
836 }
837 
838 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
839 				       struct mlx5_esw_bridge *bridge)
840 {
841 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
842 
843 	list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
844 		if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
845 			mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
846 							   entry->key.vid,
847 							   SWITCHDEV_FDB_DEL_TO_BRIDGE);
848 		mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
849 	}
850 
851 	if (vlan->pkt_reformat_pop)
852 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
853 	if (vlan->pkt_reformat_push)
854 		mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
855 }
856 
857 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
858 					 struct mlx5_esw_bridge_vlan *vlan,
859 					 struct mlx5_esw_bridge *bridge)
860 {
861 	trace_mlx5_esw_bridge_vlan_cleanup(vlan);
862 	mlx5_esw_bridge_vlan_flush(vlan, bridge);
863 	mlx5_esw_bridge_vlan_erase(port, vlan);
864 	kvfree(vlan);
865 }
866 
867 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
868 					     struct mlx5_esw_bridge *bridge)
869 {
870 	struct mlx5_esw_bridge_vlan *vlan;
871 	unsigned long index;
872 
873 	xa_for_each(&port->vlans, index, vlan)
874 		mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
875 }
876 
877 static struct mlx5_esw_bridge_vlan *
878 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge,
879 				 struct mlx5_eswitch *esw)
880 {
881 	struct mlx5_esw_bridge_port *port;
882 	struct mlx5_esw_bridge_vlan *vlan;
883 
884 	port = mlx5_esw_bridge_port_lookup(vport_num, bridge);
885 	if (!port) {
886 		/* FDB is added asynchronously on wq while port might have been deleted
887 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
888 		 */
889 		esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
890 		return ERR_PTR(-EINVAL);
891 	}
892 
893 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
894 	if (!vlan) {
895 		/* FDB is added asynchronously on wq while vlan might have been deleted
896 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
897 		 */
898 		esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
899 			 vport_num);
900 		return ERR_PTR(-EINVAL);
901 	}
902 
903 	return vlan;
904 }
905 
906 static struct mlx5_esw_bridge_fdb_entry *
907 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr,
908 			       u16 vid, bool added_by_user, struct mlx5_eswitch *esw,
909 			       struct mlx5_esw_bridge *bridge)
910 {
911 	struct mlx5_esw_bridge_vlan *vlan = NULL;
912 	struct mlx5_esw_bridge_fdb_entry *entry;
913 	struct mlx5_flow_handle *handle;
914 	struct mlx5_fc *counter;
915 	struct mlx5e_priv *priv;
916 	int err;
917 
918 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
919 		vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw);
920 		if (IS_ERR(vlan))
921 			return ERR_CAST(vlan);
922 	}
923 
924 	priv = netdev_priv(dev);
925 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
926 	if (!entry)
927 		return ERR_PTR(-ENOMEM);
928 
929 	ether_addr_copy(entry->key.addr, addr);
930 	entry->key.vid = vid;
931 	entry->dev = dev;
932 	entry->vport_num = vport_num;
933 	entry->lastuse = jiffies;
934 	if (added_by_user)
935 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
936 
937 	counter = mlx5_fc_create(priv->mdev, true);
938 	if (IS_ERR(counter)) {
939 		err = PTR_ERR(counter);
940 		goto err_ingress_fc_create;
941 	}
942 	entry->ingress_counter = counter;
943 
944 	handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter),
945 						     bridge);
946 	if (IS_ERR(handle)) {
947 		err = PTR_ERR(handle);
948 		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
949 			 vport_num, err);
950 		goto err_ingress_flow_create;
951 	}
952 	entry->ingress_handle = handle;
953 
954 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
955 		handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
956 		if (IS_ERR(handle)) {
957 			err = PTR_ERR(handle);
958 			esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
959 				 vport_num, err);
960 			goto err_ingress_filter_flow_create;
961 		}
962 		entry->filter_handle = handle;
963 	}
964 
965 	handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge);
966 	if (IS_ERR(handle)) {
967 		err = PTR_ERR(handle);
968 		esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
969 			 vport_num, err);
970 		goto err_egress_flow_create;
971 	}
972 	entry->egress_handle = handle;
973 
974 	err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
975 	if (err) {
976 		esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
977 		goto err_ht_init;
978 	}
979 
980 	if (vlan)
981 		list_add(&entry->vlan_list, &vlan->fdb_list);
982 	else
983 		INIT_LIST_HEAD(&entry->vlan_list);
984 	list_add(&entry->list, &bridge->fdb_list);
985 
986 	trace_mlx5_esw_bridge_fdb_entry_init(entry);
987 	return entry;
988 
989 err_ht_init:
990 	mlx5_del_flow_rules(entry->egress_handle);
991 err_egress_flow_create:
992 	if (entry->filter_handle)
993 		mlx5_del_flow_rules(entry->filter_handle);
994 err_ingress_filter_flow_create:
995 	mlx5_del_flow_rules(entry->ingress_handle);
996 err_ingress_flow_create:
997 	mlx5_fc_destroy(priv->mdev, entry->ingress_counter);
998 err_ingress_fc_create:
999 	kvfree(entry);
1000 	return ERR_PTR(err);
1001 }
1002 
1003 int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
1004 				    struct mlx5_vport *vport)
1005 {
1006 	if (!vport->bridge)
1007 		return -EINVAL;
1008 
1009 	vport->bridge->ageing_time = ageing_time;
1010 	return 0;
1011 }
1012 
1013 int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
1014 				       struct mlx5_vport *vport)
1015 {
1016 	struct mlx5_esw_bridge *bridge;
1017 	bool filtering;
1018 
1019 	if (!vport->bridge)
1020 		return -EINVAL;
1021 
1022 	bridge = vport->bridge;
1023 	filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1024 	if (filtering == enable)
1025 		return 0;
1026 
1027 	mlx5_esw_bridge_fdb_flush(bridge);
1028 	if (enable)
1029 		bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1030 	else
1031 		bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1032 
1033 	return 0;
1034 }
1035 
1036 static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads,
1037 				      struct mlx5_esw_bridge *bridge,
1038 				      struct mlx5_vport *vport)
1039 {
1040 	struct mlx5_eswitch *esw = br_offloads->esw;
1041 	struct mlx5_esw_bridge_port *port;
1042 	int err;
1043 
1044 	port = kvzalloc(sizeof(*port), GFP_KERNEL);
1045 	if (!port) {
1046 		err = -ENOMEM;
1047 		goto err_port_alloc;
1048 	}
1049 
1050 	port->vport_num = vport->vport;
1051 	xa_init(&port->vlans);
1052 	err = mlx5_esw_bridge_port_insert(port, bridge);
1053 	if (err) {
1054 		esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n",
1055 			 vport->vport, err);
1056 		goto err_port_insert;
1057 	}
1058 	trace_mlx5_esw_bridge_vport_init(port);
1059 
1060 	vport->bridge = bridge;
1061 	return 0;
1062 
1063 err_port_insert:
1064 	kvfree(port);
1065 err_port_alloc:
1066 	mlx5_esw_bridge_put(br_offloads, bridge);
1067 	return err;
1068 }
1069 
1070 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1071 					 struct mlx5_vport *vport)
1072 {
1073 	struct mlx5_esw_bridge *bridge = vport->bridge;
1074 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1075 	struct mlx5_esw_bridge_port *port;
1076 
1077 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1078 		if (entry->vport_num == vport->vport)
1079 			mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1080 
1081 	port = mlx5_esw_bridge_port_lookup(vport->vport, bridge);
1082 	if (!port) {
1083 		WARN(1, "Vport %u metadata not found on bridge", vport->vport);
1084 		return -EINVAL;
1085 	}
1086 
1087 	trace_mlx5_esw_bridge_vport_cleanup(port);
1088 	mlx5_esw_bridge_port_vlans_flush(port, bridge);
1089 	mlx5_esw_bridge_port_erase(port, bridge);
1090 	kvfree(port);
1091 	mlx5_esw_bridge_put(br_offloads, bridge);
1092 	vport->bridge = NULL;
1093 	return 0;
1094 }
1095 
1096 int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
1097 			       struct mlx5_vport *vport, struct netlink_ext_ack *extack)
1098 {
1099 	struct mlx5_esw_bridge *bridge;
1100 	int err;
1101 
1102 	WARN_ON(vport->bridge);
1103 
1104 	bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
1105 	if (IS_ERR(bridge)) {
1106 		NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1107 		return PTR_ERR(bridge);
1108 	}
1109 
1110 	err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport);
1111 	if (err)
1112 		NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1113 	return err;
1114 }
1115 
1116 int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
1117 				 struct mlx5_vport *vport, struct netlink_ext_ack *extack)
1118 {
1119 	struct mlx5_esw_bridge *bridge = vport->bridge;
1120 	int err;
1121 
1122 	if (!bridge) {
1123 		NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1124 		return -EINVAL;
1125 	}
1126 	if (bridge->ifindex != ifindex) {
1127 		NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1128 		return -EINVAL;
1129 	}
1130 
1131 	err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
1132 	if (err)
1133 		NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1134 	return err;
1135 }
1136 
1137 int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
1138 				  struct mlx5_vport *vport, struct netlink_ext_ack *extack)
1139 {
1140 	struct mlx5_esw_bridge_port *port;
1141 	struct mlx5_esw_bridge_vlan *vlan;
1142 
1143 	port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
1144 	if (!port)
1145 		return -EINVAL;
1146 
1147 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1148 	if (vlan) {
1149 		if (vlan->flags == flags)
1150 			return 0;
1151 		mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
1152 	}
1153 
1154 	vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, esw);
1155 	if (IS_ERR(vlan)) {
1156 		NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1157 		return PTR_ERR(vlan);
1158 	}
1159 	return 0;
1160 }
1161 
1162 void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1163 {
1164 	struct mlx5_esw_bridge_port *port;
1165 	struct mlx5_esw_bridge_vlan *vlan;
1166 
1167 	port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
1168 	if (!port)
1169 		return;
1170 
1171 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1172 	if (!vlan)
1173 		return;
1174 	mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
1175 }
1176 
1177 void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
1178 				struct mlx5_vport *vport,
1179 				struct switchdev_notifier_fdb_info *fdb_info)
1180 {
1181 	struct mlx5_esw_bridge *bridge = vport->bridge;
1182 	struct mlx5_esw_bridge_fdb_entry *entry;
1183 	u16 vport_num = vport->vport;
1184 
1185 	if (!bridge) {
1186 		esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
1187 		return;
1188 	}
1189 
1190 	entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid,
1191 					       fdb_info->added_by_user, esw, bridge);
1192 	if (IS_ERR(entry))
1193 		return;
1194 
1195 	if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1196 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1197 						   SWITCHDEV_FDB_OFFLOADED);
1198 	else
1199 		/* Take over dynamic entries to prevent kernel bridge from aging them out. */
1200 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1201 						   SWITCHDEV_FDB_ADD_TO_BRIDGE);
1202 }
1203 
1204 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
1205 				struct mlx5_vport *vport,
1206 				struct switchdev_notifier_fdb_info *fdb_info)
1207 {
1208 	struct mlx5_esw_bridge *bridge = vport->bridge;
1209 	struct mlx5_esw_bridge_fdb_entry *entry;
1210 	struct mlx5_esw_bridge_fdb_key key;
1211 	u16 vport_num = vport->vport;
1212 
1213 	if (!bridge) {
1214 		esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
1215 		return;
1216 	}
1217 
1218 	ether_addr_copy(key.addr, fdb_info->addr);
1219 	key.vid = fdb_info->vid;
1220 	entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1221 	if (!entry) {
1222 		esw_warn(esw->dev,
1223 			 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1224 			 key.addr, key.vid, vport_num);
1225 		return;
1226 	}
1227 
1228 	if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
1229 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1230 						   SWITCHDEV_FDB_DEL_TO_BRIDGE);
1231 	mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1232 }
1233 
1234 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1235 {
1236 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1237 	struct mlx5_esw_bridge *bridge;
1238 
1239 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
1240 		list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1241 			unsigned long lastuse =
1242 				(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1243 
1244 			if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1245 				continue;
1246 
1247 			if (time_after(lastuse, entry->lastuse)) {
1248 				mlx5_esw_bridge_fdb_entry_refresh(lastuse, entry);
1249 			} else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
1250 				mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
1251 								   entry->key.vid,
1252 								   SWITCHDEV_FDB_DEL_TO_BRIDGE);
1253 				mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1254 			}
1255 		}
1256 	}
1257 }
1258 
1259 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1260 {
1261 	struct mlx5_eswitch *esw = br_offloads->esw;
1262 	struct mlx5_vport *vport;
1263 	unsigned long i;
1264 
1265 	mlx5_esw_for_each_vport(esw, i, vport)
1266 		if (vport->bridge)
1267 			mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
1268 
1269 	WARN_ONCE(!list_empty(&br_offloads->bridges),
1270 		  "Cleaning up bridge offloads while still having bridges attached\n");
1271 }
1272 
1273 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1274 {
1275 	struct mlx5_esw_bridge_offloads *br_offloads;
1276 
1277 	br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1278 	if (!br_offloads)
1279 		return ERR_PTR(-ENOMEM);
1280 
1281 	INIT_LIST_HEAD(&br_offloads->bridges);
1282 	br_offloads->esw = esw;
1283 	esw->br_offloads = br_offloads;
1284 
1285 	return br_offloads;
1286 }
1287 
1288 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1289 {
1290 	struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1291 
1292 	if (!br_offloads)
1293 		return;
1294 
1295 	mlx5_esw_bridge_flush(br_offloads);
1296 
1297 	esw->br_offloads = NULL;
1298 	kvfree(br_offloads);
1299 }
1300