1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/build_bug.h>
5 #include <linux/list.h>
6 #include <linux/notifier.h>
7 #include <net/netevent.h>
8 #include <net/switchdev.h>
9 #include "lib/devcom.h"
10 #include "bridge.h"
11 #include "eswitch.h"
12 #include "bridge_priv.h"
13 #define CREATE_TRACE_POINTS
14 #include "diag/bridge_tracepoint.h"
15 
16 static const struct rhashtable_params fdb_ht_params = {
17 	.key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
18 	.key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
19 	.head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
20 	.automatic_shrinking = true,
21 };
22 
23 static void
24 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
25 				   unsigned long val)
26 {
27 	struct switchdev_notifier_fdb_info send_info = {};
28 
29 	send_info.addr = addr;
30 	send_info.vid = vid;
31 	send_info.offloaded = true;
32 	call_switchdev_notifiers(val, dev, &send_info.info, NULL);
33 }
34 
35 static void
36 mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
37 {
38 	if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
39 		mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
40 						   entry->key.vid,
41 						   SWITCHDEV_FDB_DEL_TO_BRIDGE);
42 }
43 
44 static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
45 {
46 	return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
47 		MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
48 		MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
49 		offsetof(struct vlan_ethhdr, h_vlan_proto);
50 }
51 
52 static struct mlx5_pkt_reformat *
53 mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
54 {
55 	struct mlx5_pkt_reformat_params reformat_params = {};
56 
57 	reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
58 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
59 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
60 	reformat_params.size = sizeof(struct vlan_hdr);
61 	return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
62 }
63 
64 struct mlx5_flow_table *
65 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
66 {
67 	struct mlx5_flow_table_attr ft_attr = {};
68 	struct mlx5_core_dev *dev = esw->dev;
69 	struct mlx5_flow_namespace *ns;
70 	struct mlx5_flow_table *fdb;
71 
72 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
73 	if (!ns) {
74 		esw_warn(dev, "Failed to get FDB namespace\n");
75 		return ERR_PTR(-ENOENT);
76 	}
77 
78 	ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
79 	ft_attr.max_fte = max_fte;
80 	ft_attr.level = level;
81 	ft_attr.prio = FDB_BR_OFFLOAD;
82 	fdb = mlx5_create_flow_table(ns, &ft_attr);
83 	if (IS_ERR(fdb))
84 		esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
85 
86 	return fdb;
87 }
88 
89 static struct mlx5_flow_group *
90 mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
91 					     struct mlx5_eswitch *esw,
92 					     struct mlx5_flow_table *ingress_ft)
93 {
94 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
95 	struct mlx5_flow_group *fg;
96 	u32 *in, *match;
97 
98 	in = kvzalloc(inlen, GFP_KERNEL);
99 	if (!in)
100 		return ERR_PTR(-ENOMEM);
101 
102 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
103 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
104 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
105 
106 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
107 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
108 	if (vlan_proto == ETH_P_8021Q)
109 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
110 	else if (vlan_proto == ETH_P_8021AD)
111 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
112 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
113 
114 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
115 		 mlx5_eswitch_get_vport_metadata_mask());
116 
117 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
118 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
119 
120 	fg = mlx5_create_flow_group(ingress_ft, in);
121 	kvfree(in);
122 	if (IS_ERR(fg))
123 		esw_warn(esw->dev,
124 			 "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
125 			 vlan_proto, PTR_ERR(fg));
126 
127 	return fg;
128 }
129 
130 static struct mlx5_flow_group *
131 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
132 				       struct mlx5_flow_table *ingress_ft)
133 {
134 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
135 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
136 
137 	return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
138 }
139 
140 static struct mlx5_flow_group *
141 mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
142 				       struct mlx5_flow_table *ingress_ft)
143 {
144 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
145 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
146 
147 	return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
148 							    ingress_ft);
149 }
150 
151 static struct mlx5_flow_group *
152 mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
153 						    u16 vlan_proto, struct mlx5_eswitch *esw,
154 						    struct mlx5_flow_table *ingress_ft)
155 {
156 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
157 	struct mlx5_flow_group *fg;
158 	u32 *in, *match;
159 
160 	in = kvzalloc(inlen, GFP_KERNEL);
161 	if (!in)
162 		return ERR_PTR(-ENOMEM);
163 
164 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
165 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
166 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
167 
168 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
169 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
170 	if (vlan_proto == ETH_P_8021Q)
171 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
172 	else if (vlan_proto == ETH_P_8021AD)
173 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
174 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
175 		 mlx5_eswitch_get_vport_metadata_mask());
176 
177 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
178 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
179 
180 	fg = mlx5_create_flow_group(ingress_ft, in);
181 	if (IS_ERR(fg))
182 		esw_warn(esw->dev,
183 			 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
184 			 PTR_ERR(fg));
185 	kvfree(in);
186 	return fg;
187 }
188 
189 static struct mlx5_flow_group *
190 mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
191 					      struct mlx5_flow_table *ingress_ft)
192 {
193 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
194 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
195 
196 	return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
197 								   ingress_ft);
198 }
199 
200 static struct mlx5_flow_group *
201 mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
202 					      struct mlx5_flow_table *ingress_ft)
203 {
204 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
205 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
206 
207 	return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
208 								   ingress_ft);
209 }
210 
211 static struct mlx5_flow_group *
212 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
213 {
214 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
215 	struct mlx5_flow_group *fg;
216 	u32 *in, *match;
217 
218 	in = kvzalloc(inlen, GFP_KERNEL);
219 	if (!in)
220 		return ERR_PTR(-ENOMEM);
221 
222 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
223 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
224 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
225 
226 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
227 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
228 
229 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
230 		 mlx5_eswitch_get_vport_metadata_mask());
231 
232 	MLX5_SET(create_flow_group_in, in, start_flow_index,
233 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
234 	MLX5_SET(create_flow_group_in, in, end_flow_index,
235 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
236 
237 	fg = mlx5_create_flow_group(ingress_ft, in);
238 	if (IS_ERR(fg))
239 		esw_warn(esw->dev,
240 			 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
241 			 PTR_ERR(fg));
242 
243 	kvfree(in);
244 	return fg;
245 }
246 
247 static struct mlx5_flow_group *
248 mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
249 					    struct mlx5_eswitch *esw,
250 					    struct mlx5_flow_table *egress_ft)
251 {
252 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
253 	struct mlx5_flow_group *fg;
254 	u32 *in, *match;
255 
256 	in = kvzalloc(inlen, GFP_KERNEL);
257 	if (!in)
258 		return ERR_PTR(-ENOMEM);
259 
260 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
261 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
262 
263 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
264 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
265 	if (vlan_proto == ETH_P_8021Q)
266 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
267 	else if (vlan_proto == ETH_P_8021AD)
268 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
269 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
270 
271 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
272 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
273 
274 	fg = mlx5_create_flow_group(egress_ft, in);
275 	if (IS_ERR(fg))
276 		esw_warn(esw->dev,
277 			 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
278 			 PTR_ERR(fg));
279 	kvfree(in);
280 	return fg;
281 }
282 
283 static struct mlx5_flow_group *
284 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
285 {
286 	unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
287 	unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
288 
289 	return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
290 }
291 
292 static struct mlx5_flow_group *
293 mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
294 				      struct mlx5_flow_table *egress_ft)
295 {
296 	unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
297 	unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
298 
299 	return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
300 }
301 
302 static struct mlx5_flow_group *
303 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
304 {
305 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
306 	struct mlx5_flow_group *fg;
307 	u32 *in, *match;
308 
309 	in = kvzalloc(inlen, GFP_KERNEL);
310 	if (!in)
311 		return ERR_PTR(-ENOMEM);
312 
313 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
314 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
315 
316 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
317 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
318 
319 	MLX5_SET(create_flow_group_in, in, start_flow_index,
320 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
321 	MLX5_SET(create_flow_group_in, in, end_flow_index,
322 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
323 
324 	fg = mlx5_create_flow_group(egress_ft, in);
325 	if (IS_ERR(fg))
326 		esw_warn(esw->dev,
327 			 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
328 			 PTR_ERR(fg));
329 	kvfree(in);
330 	return fg;
331 }
332 
333 static struct mlx5_flow_group *
334 mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
335 {
336 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 	struct mlx5_flow_group *fg;
338 	u32 *in, *match;
339 
340 	in = kvzalloc(inlen, GFP_KERNEL);
341 	if (!in)
342 		return ERR_PTR(-ENOMEM);
343 
344 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
345 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
346 
347 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
348 
349 	MLX5_SET(create_flow_group_in, in, start_flow_index,
350 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
351 	MLX5_SET(create_flow_group_in, in, end_flow_index,
352 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
353 
354 	fg = mlx5_create_flow_group(egress_ft, in);
355 	if (IS_ERR(fg))
356 		esw_warn(esw->dev,
357 			 "Failed to create bridge egress table miss flow group (err=%ld)\n",
358 			 PTR_ERR(fg));
359 	kvfree(in);
360 	return fg;
361 }
362 
363 static int
364 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
365 {
366 	struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
367 	struct mlx5_flow_table *ingress_ft, *skip_ft;
368 	struct mlx5_eswitch *esw = br_offloads->esw;
369 	int err;
370 
371 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
372 		return -EOPNOTSUPP;
373 
374 	ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
375 						  MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
376 						  esw);
377 	if (IS_ERR(ingress_ft))
378 		return PTR_ERR(ingress_ft);
379 
380 	skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
381 					       MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
382 					       esw);
383 	if (IS_ERR(skip_ft)) {
384 		err = PTR_ERR(skip_ft);
385 		goto err_skip_tbl;
386 	}
387 
388 	vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
389 	if (IS_ERR(vlan_fg)) {
390 		err = PTR_ERR(vlan_fg);
391 		goto err_vlan_fg;
392 	}
393 
394 	vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
395 	if (IS_ERR(vlan_filter_fg)) {
396 		err = PTR_ERR(vlan_filter_fg);
397 		goto err_vlan_filter_fg;
398 	}
399 
400 	qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
401 	if (IS_ERR(qinq_fg)) {
402 		err = PTR_ERR(qinq_fg);
403 		goto err_qinq_fg;
404 	}
405 
406 	qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
407 	if (IS_ERR(qinq_filter_fg)) {
408 		err = PTR_ERR(qinq_filter_fg);
409 		goto err_qinq_filter_fg;
410 	}
411 
412 	mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
413 	if (IS_ERR(mac_fg)) {
414 		err = PTR_ERR(mac_fg);
415 		goto err_mac_fg;
416 	}
417 
418 	br_offloads->ingress_ft = ingress_ft;
419 	br_offloads->skip_ft = skip_ft;
420 	br_offloads->ingress_vlan_fg = vlan_fg;
421 	br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
422 	br_offloads->ingress_qinq_fg = qinq_fg;
423 	br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
424 	br_offloads->ingress_mac_fg = mac_fg;
425 	return 0;
426 
427 err_mac_fg:
428 	mlx5_destroy_flow_group(qinq_filter_fg);
429 err_qinq_filter_fg:
430 	mlx5_destroy_flow_group(qinq_fg);
431 err_qinq_fg:
432 	mlx5_destroy_flow_group(vlan_filter_fg);
433 err_vlan_filter_fg:
434 	mlx5_destroy_flow_group(vlan_fg);
435 err_vlan_fg:
436 	mlx5_destroy_flow_table(skip_ft);
437 err_skip_tbl:
438 	mlx5_destroy_flow_table(ingress_ft);
439 	return err;
440 }
441 
442 static void
443 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
444 {
445 	mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
446 	br_offloads->ingress_mac_fg = NULL;
447 	mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
448 	br_offloads->ingress_qinq_filter_fg = NULL;
449 	mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
450 	br_offloads->ingress_qinq_fg = NULL;
451 	mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
452 	br_offloads->ingress_vlan_filter_fg = NULL;
453 	mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
454 	br_offloads->ingress_vlan_fg = NULL;
455 	mlx5_destroy_flow_table(br_offloads->skip_ft);
456 	br_offloads->skip_ft = NULL;
457 	mlx5_destroy_flow_table(br_offloads->ingress_ft);
458 	br_offloads->ingress_ft = NULL;
459 }
460 
461 static struct mlx5_flow_handle *
462 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
463 					struct mlx5_flow_table *skip_ft,
464 					struct mlx5_pkt_reformat *pkt_reformat);
465 
466 static int
467 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
468 				  struct mlx5_esw_bridge *bridge)
469 {
470 	struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
471 	struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
472 	struct mlx5_flow_handle *miss_handle = NULL;
473 	struct mlx5_eswitch *esw = br_offloads->esw;
474 	struct mlx5_flow_table *egress_ft;
475 	int err;
476 
477 	egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
478 						 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
479 						 esw);
480 	if (IS_ERR(egress_ft))
481 		return PTR_ERR(egress_ft);
482 
483 	vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
484 	if (IS_ERR(vlan_fg)) {
485 		err = PTR_ERR(vlan_fg);
486 		goto err_vlan_fg;
487 	}
488 
489 	qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
490 	if (IS_ERR(qinq_fg)) {
491 		err = PTR_ERR(qinq_fg);
492 		goto err_qinq_fg;
493 	}
494 
495 	mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
496 	if (IS_ERR(mac_fg)) {
497 		err = PTR_ERR(mac_fg);
498 		goto err_mac_fg;
499 	}
500 
501 	if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
502 		miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
503 		if (IS_ERR(miss_fg)) {
504 			esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
505 				 PTR_ERR(miss_fg));
506 			miss_fg = NULL;
507 			goto skip_miss_flow;
508 		}
509 
510 		miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
511 		if (IS_ERR(miss_pkt_reformat)) {
512 			esw_warn(esw->dev,
513 				 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
514 				 PTR_ERR(miss_pkt_reformat));
515 			miss_pkt_reformat = NULL;
516 			mlx5_destroy_flow_group(miss_fg);
517 			miss_fg = NULL;
518 			goto skip_miss_flow;
519 		}
520 
521 		miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
522 								      br_offloads->skip_ft,
523 								      miss_pkt_reformat);
524 		if (IS_ERR(miss_handle)) {
525 			esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
526 				 PTR_ERR(miss_handle));
527 			miss_handle = NULL;
528 			mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
529 			miss_pkt_reformat = NULL;
530 			mlx5_destroy_flow_group(miss_fg);
531 			miss_fg = NULL;
532 			goto skip_miss_flow;
533 		}
534 	}
535 skip_miss_flow:
536 
537 	bridge->egress_ft = egress_ft;
538 	bridge->egress_vlan_fg = vlan_fg;
539 	bridge->egress_qinq_fg = qinq_fg;
540 	bridge->egress_mac_fg = mac_fg;
541 	bridge->egress_miss_fg = miss_fg;
542 	bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
543 	bridge->egress_miss_handle = miss_handle;
544 	return 0;
545 
546 err_mac_fg:
547 	mlx5_destroy_flow_group(qinq_fg);
548 err_qinq_fg:
549 	mlx5_destroy_flow_group(vlan_fg);
550 err_vlan_fg:
551 	mlx5_destroy_flow_table(egress_ft);
552 	return err;
553 }
554 
555 static void
556 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
557 {
558 	if (bridge->egress_miss_handle)
559 		mlx5_del_flow_rules(bridge->egress_miss_handle);
560 	if (bridge->egress_miss_pkt_reformat)
561 		mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
562 					     bridge->egress_miss_pkt_reformat);
563 	if (bridge->egress_miss_fg)
564 		mlx5_destroy_flow_group(bridge->egress_miss_fg);
565 	mlx5_destroy_flow_group(bridge->egress_mac_fg);
566 	mlx5_destroy_flow_group(bridge->egress_qinq_fg);
567 	mlx5_destroy_flow_group(bridge->egress_vlan_fg);
568 	mlx5_destroy_flow_table(bridge->egress_ft);
569 }
570 
571 static struct mlx5_flow_handle *
572 mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
573 					     struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
574 					     struct mlx5_esw_bridge *bridge,
575 					     struct mlx5_eswitch *esw)
576 {
577 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
578 	struct mlx5_flow_act flow_act = {
579 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
580 		.flags = FLOW_ACT_NO_APPEND,
581 	};
582 	struct mlx5_flow_destination dests[2] = {};
583 	struct mlx5_flow_spec *rule_spec;
584 	struct mlx5_flow_handle *handle;
585 	u8 *smac_v, *smac_c;
586 
587 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
588 	if (!rule_spec)
589 		return ERR_PTR(-ENOMEM);
590 
591 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
592 
593 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
594 			      outer_headers.smac_47_16);
595 	ether_addr_copy(smac_v, addr);
596 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
597 			      outer_headers.smac_47_16);
598 	eth_broadcast_addr(smac_c);
599 
600 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
601 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
602 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
603 		 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
604 
605 	if (vlan && vlan->pkt_reformat_push) {
606 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
607 			MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
608 		flow_act.pkt_reformat = vlan->pkt_reformat_push;
609 		flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
610 	} else if (vlan) {
611 		if (bridge->vlan_proto == ETH_P_8021Q) {
612 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
613 					 outer_headers.cvlan_tag);
614 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
615 					 outer_headers.cvlan_tag);
616 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
617 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
618 					 outer_headers.svlan_tag);
619 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
620 					 outer_headers.svlan_tag);
621 		}
622 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
623 				 outer_headers.first_vid);
624 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
625 			 vlan->vid);
626 	}
627 
628 	dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
629 	dests[0].ft = bridge->egress_ft;
630 	dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
631 	dests[1].counter_id = counter_id;
632 
633 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
634 				     ARRAY_SIZE(dests));
635 
636 	kvfree(rule_spec);
637 	return handle;
638 }
639 
640 static struct mlx5_flow_handle *
641 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
642 				    struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
643 				    struct mlx5_esw_bridge *bridge)
644 {
645 	return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
646 							    bridge, bridge->br_offloads->esw);
647 }
648 
649 static struct mlx5_flow_handle *
650 mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
651 					 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
652 					 struct mlx5_esw_bridge *bridge)
653 {
654 	struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
655 	static struct mlx5_flow_handle *handle;
656 	struct mlx5_eswitch *peer_esw;
657 
658 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
659 	if (!peer_esw)
660 		return ERR_PTR(-ENODEV);
661 
662 	handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
663 							      bridge, peer_esw);
664 
665 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
666 	return handle;
667 }
668 
669 static struct mlx5_flow_handle *
670 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
671 					   struct mlx5_esw_bridge *bridge)
672 {
673 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
674 	struct mlx5_flow_destination dest = {
675 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
676 		.ft = br_offloads->skip_ft,
677 	};
678 	struct mlx5_flow_act flow_act = {
679 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
680 		.flags = FLOW_ACT_NO_APPEND,
681 	};
682 	struct mlx5_flow_spec *rule_spec;
683 	struct mlx5_flow_handle *handle;
684 	u8 *smac_v, *smac_c;
685 
686 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
687 	if (!rule_spec)
688 		return ERR_PTR(-ENOMEM);
689 
690 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
691 
692 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
693 			      outer_headers.smac_47_16);
694 	ether_addr_copy(smac_v, addr);
695 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
696 			      outer_headers.smac_47_16);
697 	eth_broadcast_addr(smac_c);
698 
699 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
700 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
701 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
702 		 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
703 
704 	if (bridge->vlan_proto == ETH_P_8021Q) {
705 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
706 				 outer_headers.cvlan_tag);
707 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
708 				 outer_headers.cvlan_tag);
709 	} else if (bridge->vlan_proto == ETH_P_8021AD) {
710 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
711 				 outer_headers.svlan_tag);
712 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
713 				 outer_headers.svlan_tag);
714 	}
715 
716 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
717 
718 	kvfree(rule_spec);
719 	return handle;
720 }
721 
722 static struct mlx5_flow_handle *
723 mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
724 				   struct mlx5_esw_bridge_vlan *vlan,
725 				   struct mlx5_esw_bridge *bridge)
726 {
727 	struct mlx5_flow_destination dest = {
728 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
729 		.vport.num = vport_num,
730 	};
731 	struct mlx5_flow_act flow_act = {
732 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
733 		.flags = FLOW_ACT_NO_APPEND,
734 	};
735 	struct mlx5_flow_spec *rule_spec;
736 	struct mlx5_flow_handle *handle;
737 	u8 *dmac_v, *dmac_c;
738 
739 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
740 	if (!rule_spec)
741 		return ERR_PTR(-ENOMEM);
742 
743 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
744 	    vport_num == MLX5_VPORT_UPLINK)
745 		rule_spec->flow_context.flow_source =
746 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
747 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
748 
749 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
750 			      outer_headers.dmac_47_16);
751 	ether_addr_copy(dmac_v, addr);
752 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
753 			      outer_headers.dmac_47_16);
754 	eth_broadcast_addr(dmac_c);
755 
756 	if (vlan) {
757 		if (vlan->pkt_reformat_pop) {
758 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
759 			flow_act.pkt_reformat = vlan->pkt_reformat_pop;
760 		}
761 
762 		if (bridge->vlan_proto == ETH_P_8021Q) {
763 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
764 					 outer_headers.cvlan_tag);
765 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
766 					 outer_headers.cvlan_tag);
767 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
768 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
769 					 outer_headers.svlan_tag);
770 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
771 					 outer_headers.svlan_tag);
772 		}
773 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
774 				 outer_headers.first_vid);
775 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
776 			 vlan->vid);
777 	}
778 
779 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
780 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
781 		dest.vport.vhca_id = esw_owner_vhca_id;
782 	}
783 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
784 
785 	kvfree(rule_spec);
786 	return handle;
787 }
788 
789 static struct mlx5_flow_handle *
790 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
791 					struct mlx5_flow_table *skip_ft,
792 					struct mlx5_pkt_reformat *pkt_reformat)
793 {
794 	struct mlx5_flow_destination dest = {
795 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
796 		.ft = skip_ft,
797 	};
798 	struct mlx5_flow_act flow_act = {
799 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
800 		MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
801 		.flags = FLOW_ACT_NO_APPEND,
802 		.pkt_reformat = pkt_reformat,
803 	};
804 	struct mlx5_flow_spec *rule_spec;
805 	struct mlx5_flow_handle *handle;
806 
807 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
808 	if (!rule_spec)
809 		return ERR_PTR(-ENOMEM);
810 
811 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
812 
813 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
814 		 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
815 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
816 		 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
817 
818 	handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
819 
820 	kvfree(rule_spec);
821 	return handle;
822 }
823 
824 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
825 						      struct mlx5_esw_bridge_offloads *br_offloads)
826 {
827 	struct mlx5_esw_bridge *bridge;
828 	int err;
829 
830 	bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
831 	if (!bridge)
832 		return ERR_PTR(-ENOMEM);
833 
834 	bridge->br_offloads = br_offloads;
835 	err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
836 	if (err)
837 		goto err_egress_tbl;
838 
839 	err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
840 	if (err)
841 		goto err_fdb_ht;
842 
843 	err = mlx5_esw_bridge_mdb_init(bridge);
844 	if (err)
845 		goto err_mdb_ht;
846 
847 	INIT_LIST_HEAD(&bridge->fdb_list);
848 	bridge->ifindex = ifindex;
849 	bridge->refcnt = 1;
850 	bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
851 	bridge->vlan_proto = ETH_P_8021Q;
852 	list_add(&bridge->list, &br_offloads->bridges);
853 
854 	return bridge;
855 
856 err_mdb_ht:
857 	rhashtable_destroy(&bridge->fdb_ht);
858 err_fdb_ht:
859 	mlx5_esw_bridge_egress_table_cleanup(bridge);
860 err_egress_tbl:
861 	kvfree(bridge);
862 	return ERR_PTR(err);
863 }
864 
865 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
866 {
867 	bridge->refcnt++;
868 }
869 
870 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
871 				struct mlx5_esw_bridge *bridge)
872 {
873 	if (--bridge->refcnt)
874 		return;
875 
876 	mlx5_esw_bridge_egress_table_cleanup(bridge);
877 	mlx5_esw_bridge_mcast_disable(bridge);
878 	list_del(&bridge->list);
879 	mlx5_esw_bridge_mdb_cleanup(bridge);
880 	rhashtable_destroy(&bridge->fdb_ht);
881 	kvfree(bridge);
882 
883 	if (list_empty(&br_offloads->bridges))
884 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
885 }
886 
887 static struct mlx5_esw_bridge *
888 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
889 {
890 	struct mlx5_esw_bridge *bridge;
891 
892 	ASSERT_RTNL();
893 
894 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
895 		if (bridge->ifindex == ifindex) {
896 			mlx5_esw_bridge_get(bridge);
897 			return bridge;
898 		}
899 	}
900 
901 	if (!br_offloads->ingress_ft) {
902 		int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
903 
904 		if (err)
905 			return ERR_PTR(err);
906 	}
907 
908 	bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
909 	if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
910 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
911 	return bridge;
912 }
913 
914 static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
915 {
916 	return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
917 }
918 
919 unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
920 {
921 	return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
922 }
923 
924 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
925 				       struct mlx5_esw_bridge_offloads *br_offloads)
926 {
927 	return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
928 }
929 
930 static struct mlx5_esw_bridge_port *
931 mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
932 			    struct mlx5_esw_bridge_offloads *br_offloads)
933 {
934 	return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
935 									       esw_owner_vhca_id));
936 }
937 
938 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
939 				       struct mlx5_esw_bridge_offloads *br_offloads)
940 {
941 	xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
942 }
943 
944 static struct mlx5_esw_bridge *
945 mlx5_esw_bridge_from_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
946 				 struct mlx5_esw_bridge_offloads *br_offloads)
947 {
948 	struct mlx5_esw_bridge_port *port;
949 
950 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
951 	if (!port)
952 		return NULL;
953 
954 	return port->bridge;
955 }
956 
957 static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
958 {
959 	trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
960 
961 	mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
962 					   entry->key.vid,
963 					   SWITCHDEV_FDB_ADD_TO_BRIDGE);
964 }
965 
966 static void
967 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
968 				  struct mlx5_esw_bridge *bridge)
969 {
970 	trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
971 
972 	rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
973 	mlx5_del_flow_rules(entry->egress_handle);
974 	if (entry->filter_handle)
975 		mlx5_del_flow_rules(entry->filter_handle);
976 	mlx5_del_flow_rules(entry->ingress_handle);
977 	mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
978 	list_del(&entry->vlan_list);
979 	list_del(&entry->list);
980 	kvfree(entry);
981 }
982 
983 static void
984 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
985 					     struct mlx5_esw_bridge *bridge)
986 {
987 	mlx5_esw_bridge_fdb_del_notify(entry);
988 	mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
989 }
990 
991 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
992 {
993 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
994 
995 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
996 		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
997 }
998 
999 static struct mlx5_esw_bridge_vlan *
1000 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
1001 {
1002 	return xa_load(&port->vlans, vid);
1003 }
1004 
1005 static int
1006 mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
1007 				 struct mlx5_eswitch *esw)
1008 {
1009 	struct {
1010 		__be16	h_vlan_proto;
1011 		__be16	h_vlan_TCI;
1012 	} vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
1013 	struct mlx5_pkt_reformat_params reformat_params = {};
1014 	struct mlx5_pkt_reformat *pkt_reformat;
1015 
1016 	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
1017 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
1018 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
1019 	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
1020 		esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
1021 		return -EOPNOTSUPP;
1022 	}
1023 
1024 	reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
1025 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
1026 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
1027 	reformat_params.size = sizeof(vlan_hdr);
1028 	reformat_params.data = &vlan_hdr;
1029 	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
1030 						  &reformat_params,
1031 						  MLX5_FLOW_NAMESPACE_FDB);
1032 	if (IS_ERR(pkt_reformat)) {
1033 		esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
1034 			 PTR_ERR(pkt_reformat));
1035 		return PTR_ERR(pkt_reformat);
1036 	}
1037 
1038 	vlan->pkt_reformat_push = pkt_reformat;
1039 	return 0;
1040 }
1041 
1042 static void
1043 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1044 {
1045 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
1046 	vlan->pkt_reformat_push = NULL;
1047 }
1048 
1049 static int
1050 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1051 {
1052 	struct mlx5_pkt_reformat *pkt_reformat;
1053 
1054 	if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
1055 		esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
1056 		return -EOPNOTSUPP;
1057 	}
1058 
1059 	pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
1060 	if (IS_ERR(pkt_reformat)) {
1061 		esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
1062 			 PTR_ERR(pkt_reformat));
1063 		return PTR_ERR(pkt_reformat);
1064 	}
1065 
1066 	vlan->pkt_reformat_pop = pkt_reformat;
1067 	return 0;
1068 }
1069 
1070 static void
1071 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1072 {
1073 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
1074 	vlan->pkt_reformat_pop = NULL;
1075 }
1076 
1077 static int
1078 mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1079 {
1080 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1081 	struct mlx5_modify_hdr *pkt_mod_hdr;
1082 
1083 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1084 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1085 	MLX5_SET(set_action_in, action, offset, 8);
1086 	MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
1087 	MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
1088 
1089 	pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
1090 	if (IS_ERR(pkt_mod_hdr))
1091 		return PTR_ERR(pkt_mod_hdr);
1092 
1093 	vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1094 	return 0;
1095 }
1096 
1097 static void
1098 mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1099 {
1100 	mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1101 	vlan->pkt_mod_hdr_push_mark = NULL;
1102 }
1103 
1104 static int
1105 mlx5_esw_bridge_vlan_push_pop_fhs_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
1106 					 struct mlx5_esw_bridge_vlan *vlan)
1107 {
1108 	return mlx5_esw_bridge_vlan_mcast_init(vlan_proto, port, vlan);
1109 }
1110 
1111 static void
1112 mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(struct mlx5_esw_bridge_vlan *vlan)
1113 {
1114 	mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
1115 }
1116 
1117 static int
1118 mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_port *port,
1119 				     struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1120 {
1121 	int err;
1122 
1123 	if (flags & BRIDGE_VLAN_INFO_PVID) {
1124 		err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
1125 		if (err)
1126 			return err;
1127 
1128 		err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1129 		if (err)
1130 			goto err_vlan_push_mark;
1131 	}
1132 
1133 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1134 		err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1135 		if (err)
1136 			goto err_vlan_pop;
1137 
1138 		err = mlx5_esw_bridge_vlan_push_pop_fhs_create(vlan_proto, port, vlan);
1139 		if (err)
1140 			goto err_vlan_pop_fhs;
1141 	}
1142 
1143 	return 0;
1144 
1145 err_vlan_pop_fhs:
1146 	mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1147 err_vlan_pop:
1148 	if (vlan->pkt_mod_hdr_push_mark)
1149 		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1150 err_vlan_push_mark:
1151 	if (vlan->pkt_reformat_push)
1152 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1153 	return err;
1154 }
1155 
1156 static struct mlx5_esw_bridge_vlan *
1157 mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1158 			    struct mlx5_eswitch *esw)
1159 {
1160 	struct mlx5_esw_bridge_vlan *vlan;
1161 	int err;
1162 
1163 	vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1164 	if (!vlan)
1165 		return ERR_PTR(-ENOMEM);
1166 
1167 	vlan->vid = vid;
1168 	vlan->flags = flags;
1169 	INIT_LIST_HEAD(&vlan->fdb_list);
1170 
1171 	err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, port, vlan, esw);
1172 	if (err)
1173 		goto err_vlan_push_pop;
1174 
1175 	err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1176 	if (err)
1177 		goto err_xa_insert;
1178 
1179 	trace_mlx5_esw_bridge_vlan_create(vlan);
1180 	return vlan;
1181 
1182 err_xa_insert:
1183 	if (vlan->mcast_handle)
1184 		mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1185 	if (vlan->pkt_reformat_pop)
1186 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1187 	if (vlan->pkt_mod_hdr_push_mark)
1188 		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1189 	if (vlan->pkt_reformat_push)
1190 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1191 err_vlan_push_pop:
1192 	kvfree(vlan);
1193 	return ERR_PTR(err);
1194 }
1195 
1196 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1197 				       struct mlx5_esw_bridge_vlan *vlan)
1198 {
1199 	xa_erase(&port->vlans, vlan->vid);
1200 }
1201 
1202 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_port *port,
1203 				       struct mlx5_esw_bridge_vlan *vlan,
1204 				       struct mlx5_esw_bridge *bridge)
1205 {
1206 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1207 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1208 
1209 	list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1210 		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1211 	mlx5_esw_bridge_port_mdb_vlan_flush(port, vlan);
1212 
1213 	if (vlan->mcast_handle)
1214 		mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1215 	if (vlan->pkt_reformat_pop)
1216 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1217 	if (vlan->pkt_mod_hdr_push_mark)
1218 		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1219 	if (vlan->pkt_reformat_push)
1220 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1221 }
1222 
1223 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1224 					 struct mlx5_esw_bridge_vlan *vlan,
1225 					 struct mlx5_esw_bridge *bridge)
1226 {
1227 	trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1228 	mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1229 	mlx5_esw_bridge_vlan_erase(port, vlan);
1230 	kvfree(vlan);
1231 }
1232 
1233 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1234 					     struct mlx5_esw_bridge *bridge)
1235 {
1236 	struct mlx5_esw_bridge_vlan *vlan;
1237 	unsigned long index;
1238 
1239 	xa_for_each(&port->vlans, index, vlan)
1240 		mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1241 }
1242 
1243 static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
1244 					       struct mlx5_esw_bridge *bridge)
1245 {
1246 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1247 	struct mlx5_esw_bridge_vlan *vlan;
1248 	unsigned long i;
1249 	int err;
1250 
1251 	xa_for_each(&port->vlans, i, vlan) {
1252 		mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1253 		err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, port,
1254 							   vlan, br_offloads->esw);
1255 		if (err) {
1256 			esw_warn(br_offloads->esw->dev,
1257 				 "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
1258 				 vlan->vid, bridge->vlan_proto, port->vport_num,
1259 				 err);
1260 			return err;
1261 		}
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 static int
1268 mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
1269 {
1270 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1271 	struct mlx5_esw_bridge_port *port;
1272 	unsigned long i;
1273 	int err;
1274 
1275 	xa_for_each(&br_offloads->ports, i, port) {
1276 		if (port->bridge != bridge)
1277 			continue;
1278 
1279 		err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
1280 		if (err)
1281 			return err;
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 static struct mlx5_esw_bridge_vlan *
1288 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1289 				 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1290 {
1291 	struct mlx5_esw_bridge_port *port;
1292 	struct mlx5_esw_bridge_vlan *vlan;
1293 
1294 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1295 	if (!port) {
1296 		/* FDB is added asynchronously on wq while port might have been deleted
1297 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
1298 		 */
1299 		esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1300 		return ERR_PTR(-EINVAL);
1301 	}
1302 
1303 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1304 	if (!vlan) {
1305 		/* FDB is added asynchronously on wq while vlan might have been deleted
1306 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
1307 		 */
1308 		esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1309 			 vport_num);
1310 		return ERR_PTR(-EINVAL);
1311 	}
1312 
1313 	return vlan;
1314 }
1315 
1316 static struct mlx5_esw_bridge_fdb_entry *
1317 mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1318 			   const unsigned char *addr, u16 vid)
1319 {
1320 	struct mlx5_esw_bridge_fdb_key key = {};
1321 
1322 	ether_addr_copy(key.addr, addr);
1323 	key.vid = vid;
1324 	return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1325 }
1326 
1327 static struct mlx5_esw_bridge_fdb_entry *
1328 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1329 			       const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1330 			       struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1331 {
1332 	struct mlx5_esw_bridge_vlan *vlan = NULL;
1333 	struct mlx5_esw_bridge_fdb_entry *entry;
1334 	struct mlx5_flow_handle *handle;
1335 	struct mlx5_fc *counter;
1336 	int err;
1337 
1338 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1339 		vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1340 							esw);
1341 		if (IS_ERR(vlan))
1342 			return ERR_CAST(vlan);
1343 	}
1344 
1345 	entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1346 	if (entry)
1347 		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1348 
1349 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1350 	if (!entry)
1351 		return ERR_PTR(-ENOMEM);
1352 
1353 	ether_addr_copy(entry->key.addr, addr);
1354 	entry->key.vid = vid;
1355 	entry->dev = dev;
1356 	entry->vport_num = vport_num;
1357 	entry->esw_owner_vhca_id = esw_owner_vhca_id;
1358 	entry->lastuse = jiffies;
1359 	if (added_by_user)
1360 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1361 	if (peer)
1362 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1363 
1364 	counter = mlx5_fc_create(esw->dev, true);
1365 	if (IS_ERR(counter)) {
1366 		err = PTR_ERR(counter);
1367 		goto err_ingress_fc_create;
1368 	}
1369 	entry->ingress_counter = counter;
1370 
1371 	handle = peer ?
1372 		mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
1373 							 mlx5_fc_id(counter), bridge) :
1374 		mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1375 						    mlx5_fc_id(counter), bridge);
1376 	if (IS_ERR(handle)) {
1377 		err = PTR_ERR(handle);
1378 		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
1379 			 vport_num, err);
1380 		goto err_ingress_flow_create;
1381 	}
1382 	entry->ingress_handle = handle;
1383 
1384 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1385 		handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1386 		if (IS_ERR(handle)) {
1387 			err = PTR_ERR(handle);
1388 			esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1389 				 vport_num, err);
1390 			goto err_ingress_filter_flow_create;
1391 		}
1392 		entry->filter_handle = handle;
1393 	}
1394 
1395 	handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1396 						    bridge);
1397 	if (IS_ERR(handle)) {
1398 		err = PTR_ERR(handle);
1399 		esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1400 			 vport_num, err);
1401 		goto err_egress_flow_create;
1402 	}
1403 	entry->egress_handle = handle;
1404 
1405 	err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1406 	if (err) {
1407 		esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1408 		goto err_ht_init;
1409 	}
1410 
1411 	if (vlan)
1412 		list_add(&entry->vlan_list, &vlan->fdb_list);
1413 	else
1414 		INIT_LIST_HEAD(&entry->vlan_list);
1415 	list_add(&entry->list, &bridge->fdb_list);
1416 
1417 	trace_mlx5_esw_bridge_fdb_entry_init(entry);
1418 	return entry;
1419 
1420 err_ht_init:
1421 	mlx5_del_flow_rules(entry->egress_handle);
1422 err_egress_flow_create:
1423 	if (entry->filter_handle)
1424 		mlx5_del_flow_rules(entry->filter_handle);
1425 err_ingress_filter_flow_create:
1426 	mlx5_del_flow_rules(entry->ingress_handle);
1427 err_ingress_flow_create:
1428 	mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1429 err_ingress_fc_create:
1430 	kvfree(entry);
1431 	return ERR_PTR(err);
1432 }
1433 
1434 int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1435 				    struct mlx5_esw_bridge_offloads *br_offloads)
1436 {
1437 	struct mlx5_esw_bridge *bridge;
1438 
1439 	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1440 	if (!bridge)
1441 		return -EINVAL;
1442 
1443 	bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1444 	return 0;
1445 }
1446 
1447 int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1448 				       struct mlx5_esw_bridge_offloads *br_offloads)
1449 {
1450 	struct mlx5_esw_bridge *bridge;
1451 	bool filtering;
1452 
1453 	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1454 	if (!bridge)
1455 		return -EINVAL;
1456 
1457 	filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1458 	if (filtering == enable)
1459 		return 0;
1460 
1461 	mlx5_esw_bridge_fdb_flush(bridge);
1462 	mlx5_esw_bridge_mdb_flush(bridge);
1463 	if (enable)
1464 		bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1465 	else
1466 		bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1467 
1468 	return 0;
1469 }
1470 
1471 int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
1472 				   struct mlx5_esw_bridge_offloads *br_offloads)
1473 {
1474 	struct mlx5_esw_bridge *bridge;
1475 
1476 	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id,
1477 						  br_offloads);
1478 	if (!bridge)
1479 		return -EINVAL;
1480 
1481 	if (bridge->vlan_proto == proto)
1482 		return 0;
1483 	if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
1484 		esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
1485 		return -EOPNOTSUPP;
1486 	}
1487 
1488 	mlx5_esw_bridge_fdb_flush(bridge);
1489 	mlx5_esw_bridge_mdb_flush(bridge);
1490 	bridge->vlan_proto = proto;
1491 	mlx5_esw_bridge_vlans_recreate(bridge);
1492 
1493 	return 0;
1494 }
1495 
1496 int mlx5_esw_bridge_mcast_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1497 			      struct mlx5_esw_bridge_offloads *br_offloads)
1498 {
1499 	struct mlx5_eswitch *esw = br_offloads->esw;
1500 	struct mlx5_esw_bridge *bridge;
1501 	int err = 0;
1502 	bool mcast;
1503 
1504 	if (!(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table) ||
1505 	      MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table_limit_regc)) ||
1506 	    !MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_uplink_hairpin) ||
1507 	    !MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
1508 		return -EOPNOTSUPP;
1509 
1510 	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1511 	if (!bridge)
1512 		return -EINVAL;
1513 
1514 	mcast = bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG;
1515 	if (mcast == enable)
1516 		return 0;
1517 
1518 	if (enable)
1519 		err = mlx5_esw_bridge_mcast_enable(bridge);
1520 	else
1521 		mlx5_esw_bridge_mcast_disable(bridge);
1522 
1523 	return err;
1524 }
1525 
1526 static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1527 				      struct mlx5_esw_bridge_offloads *br_offloads,
1528 				      struct mlx5_esw_bridge *bridge)
1529 {
1530 	struct mlx5_eswitch *esw = br_offloads->esw;
1531 	struct mlx5_esw_bridge_port *port;
1532 	int err;
1533 
1534 	port = kvzalloc(sizeof(*port), GFP_KERNEL);
1535 	if (!port)
1536 		return -ENOMEM;
1537 
1538 	port->vport_num = vport_num;
1539 	port->esw_owner_vhca_id = esw_owner_vhca_id;
1540 	port->bridge = bridge;
1541 	port->flags |= flags;
1542 	xa_init(&port->vlans);
1543 
1544 	err = mlx5_esw_bridge_port_mcast_init(port);
1545 	if (err) {
1546 		esw_warn(esw->dev,
1547 			 "Failed to initialize port multicast (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1548 			 port->vport_num, port->esw_owner_vhca_id, err);
1549 		goto err_port_mcast;
1550 	}
1551 
1552 	err = mlx5_esw_bridge_port_insert(port, br_offloads);
1553 	if (err) {
1554 		esw_warn(esw->dev,
1555 			 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1556 			 port->vport_num, port->esw_owner_vhca_id, err);
1557 		goto err_port_insert;
1558 	}
1559 	trace_mlx5_esw_bridge_vport_init(port);
1560 
1561 	return 0;
1562 
1563 err_port_insert:
1564 	mlx5_esw_bridge_port_mcast_cleanup(port);
1565 err_port_mcast:
1566 	kvfree(port);
1567 	return err;
1568 }
1569 
1570 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1571 					 struct mlx5_esw_bridge_port *port)
1572 {
1573 	u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1574 	struct mlx5_esw_bridge *bridge = port->bridge;
1575 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1576 
1577 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1578 		if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1579 			mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1580 
1581 	trace_mlx5_esw_bridge_vport_cleanup(port);
1582 	mlx5_esw_bridge_port_vlans_flush(port, bridge);
1583 	mlx5_esw_bridge_port_mcast_cleanup(port);
1584 	mlx5_esw_bridge_port_erase(port, br_offloads);
1585 	kvfree(port);
1586 	mlx5_esw_bridge_put(br_offloads, bridge);
1587 	return 0;
1588 }
1589 
1590 static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1591 						 u16 flags,
1592 						 struct mlx5_esw_bridge_offloads *br_offloads,
1593 						 struct netlink_ext_ack *extack)
1594 {
1595 	struct mlx5_esw_bridge *bridge;
1596 	int err;
1597 
1598 	bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
1599 	if (IS_ERR(bridge)) {
1600 		NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1601 		return PTR_ERR(bridge);
1602 	}
1603 
1604 	err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1605 	if (err) {
1606 		NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1607 		goto err_vport;
1608 	}
1609 	return 0;
1610 
1611 err_vport:
1612 	mlx5_esw_bridge_put(br_offloads, bridge);
1613 	return err;
1614 }
1615 
1616 int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1617 			       struct mlx5_esw_bridge_offloads *br_offloads,
1618 			       struct netlink_ext_ack *extack)
1619 {
1620 	return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
1621 						     br_offloads, extack);
1622 }
1623 
1624 int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1625 				 struct mlx5_esw_bridge_offloads *br_offloads,
1626 				 struct netlink_ext_ack *extack)
1627 {
1628 	struct mlx5_esw_bridge_port *port;
1629 	int err;
1630 
1631 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1632 	if (!port) {
1633 		NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1634 		return -EINVAL;
1635 	}
1636 	if (port->bridge->ifindex != ifindex) {
1637 		NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1638 		return -EINVAL;
1639 	}
1640 
1641 	err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1642 	if (err)
1643 		NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1644 	return err;
1645 }
1646 
1647 int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1648 				    struct mlx5_esw_bridge_offloads *br_offloads,
1649 				    struct netlink_ext_ack *extack)
1650 {
1651 	if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1652 		return 0;
1653 
1654 	return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
1655 						     MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1656 						     br_offloads, extack);
1657 }
1658 
1659 int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1660 				      struct mlx5_esw_bridge_offloads *br_offloads,
1661 				      struct netlink_ext_ack *extack)
1662 {
1663 	return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
1664 					    extack);
1665 }
1666 
1667 int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1668 				  struct mlx5_esw_bridge_offloads *br_offloads,
1669 				  struct netlink_ext_ack *extack)
1670 {
1671 	struct mlx5_esw_bridge_port *port;
1672 	struct mlx5_esw_bridge_vlan *vlan;
1673 
1674 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1675 	if (!port)
1676 		return -EINVAL;
1677 
1678 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1679 	if (vlan) {
1680 		if (vlan->flags == flags)
1681 			return 0;
1682 		mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1683 	}
1684 
1685 	vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
1686 					   br_offloads->esw);
1687 	if (IS_ERR(vlan)) {
1688 		NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1689 		return PTR_ERR(vlan);
1690 	}
1691 	return 0;
1692 }
1693 
1694 void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1695 				   struct mlx5_esw_bridge_offloads *br_offloads)
1696 {
1697 	struct mlx5_esw_bridge_port *port;
1698 	struct mlx5_esw_bridge_vlan *vlan;
1699 
1700 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1701 	if (!port)
1702 		return;
1703 
1704 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1705 	if (!vlan)
1706 		return;
1707 	mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1708 }
1709 
1710 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1711 				     struct mlx5_esw_bridge_offloads *br_offloads,
1712 				     struct switchdev_notifier_fdb_info *fdb_info)
1713 {
1714 	struct mlx5_esw_bridge_fdb_entry *entry;
1715 	struct mlx5_esw_bridge *bridge;
1716 
1717 	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1718 	if (!bridge)
1719 		return;
1720 
1721 	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1722 	if (!entry) {
1723 		esw_debug(br_offloads->esw->dev,
1724 			  "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1725 			  fdb_info->addr, fdb_info->vid, vport_num);
1726 		return;
1727 	}
1728 
1729 	entry->lastuse = jiffies;
1730 }
1731 
1732 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1733 				struct mlx5_esw_bridge_offloads *br_offloads,
1734 				struct switchdev_notifier_fdb_info *fdb_info)
1735 {
1736 	struct mlx5_esw_bridge_fdb_entry *entry;
1737 	struct mlx5_esw_bridge_port *port;
1738 	struct mlx5_esw_bridge *bridge;
1739 
1740 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1741 	if (!port)
1742 		return;
1743 
1744 	bridge = port->bridge;
1745 	entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1746 					       fdb_info->vid, fdb_info->added_by_user,
1747 					       port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1748 					       br_offloads->esw, bridge);
1749 	if (IS_ERR(entry))
1750 		return;
1751 
1752 	if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1753 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1754 						   SWITCHDEV_FDB_OFFLOADED);
1755 	else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1756 		/* Take over dynamic entries to prevent kernel bridge from aging them out. */
1757 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1758 						   SWITCHDEV_FDB_ADD_TO_BRIDGE);
1759 }
1760 
1761 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1762 				struct mlx5_esw_bridge_offloads *br_offloads,
1763 				struct switchdev_notifier_fdb_info *fdb_info)
1764 {
1765 	struct mlx5_eswitch *esw = br_offloads->esw;
1766 	struct mlx5_esw_bridge_fdb_entry *entry;
1767 	struct mlx5_esw_bridge *bridge;
1768 
1769 	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1770 	if (!bridge)
1771 		return;
1772 
1773 	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1774 	if (!entry) {
1775 		esw_debug(esw->dev,
1776 			  "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1777 			  fdb_info->addr, fdb_info->vid, vport_num);
1778 		return;
1779 	}
1780 
1781 	mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1782 }
1783 
1784 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1785 {
1786 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1787 	struct mlx5_esw_bridge *bridge;
1788 
1789 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
1790 		list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1791 			unsigned long lastuse =
1792 				(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1793 
1794 			if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1795 				continue;
1796 
1797 			if (time_after(lastuse, entry->lastuse))
1798 				mlx5_esw_bridge_fdb_entry_refresh(entry);
1799 			else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1800 				 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1801 				mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1802 		}
1803 	}
1804 }
1805 
1806 int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1807 				 const unsigned char *addr, u16 vid,
1808 				 struct mlx5_esw_bridge_offloads *br_offloads,
1809 				 struct netlink_ext_ack *extack)
1810 {
1811 	struct mlx5_esw_bridge_vlan *vlan;
1812 	struct mlx5_esw_bridge_port *port;
1813 	struct mlx5_esw_bridge *bridge;
1814 	int err;
1815 
1816 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1817 	if (!port) {
1818 		esw_warn(br_offloads->esw->dev,
1819 			 "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1820 			 addr, vport_num);
1821 		NL_SET_ERR_MSG_FMT_MOD(extack,
1822 				       "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1823 				       addr, vport_num);
1824 		return -EINVAL;
1825 	}
1826 
1827 	bridge = port->bridge;
1828 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1829 		vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1830 		if (!vlan) {
1831 			esw_warn(br_offloads->esw->dev,
1832 				 "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1833 				 addr, vid, vport_num);
1834 			NL_SET_ERR_MSG_FMT_MOD(extack,
1835 					       "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1836 					       addr, vid, vport_num);
1837 			return -EINVAL;
1838 		}
1839 	}
1840 
1841 	err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
1842 	if (err) {
1843 		NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
1844 				       addr, vid, vport_num);
1845 		return err;
1846 	}
1847 
1848 	return 0;
1849 }
1850 
1851 void mlx5_esw_bridge_port_mdb_del(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1852 				  const unsigned char *addr, u16 vid,
1853 				  struct mlx5_esw_bridge_offloads *br_offloads)
1854 {
1855 	struct mlx5_esw_bridge_port *port;
1856 
1857 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1858 	if (!port)
1859 		return;
1860 
1861 	mlx5_esw_bridge_port_mdb_detach(dev, port, addr, vid);
1862 }
1863 
1864 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1865 {
1866 	struct mlx5_esw_bridge_port *port;
1867 	unsigned long i;
1868 
1869 	xa_for_each(&br_offloads->ports, i, port)
1870 		mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1871 
1872 	WARN_ONCE(!list_empty(&br_offloads->bridges),
1873 		  "Cleaning up bridge offloads while still having bridges attached\n");
1874 }
1875 
1876 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1877 {
1878 	struct mlx5_esw_bridge_offloads *br_offloads;
1879 
1880 	ASSERT_RTNL();
1881 
1882 	br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1883 	if (!br_offloads)
1884 		return ERR_PTR(-ENOMEM);
1885 
1886 	INIT_LIST_HEAD(&br_offloads->bridges);
1887 	xa_init(&br_offloads->ports);
1888 	br_offloads->esw = esw;
1889 	esw->br_offloads = br_offloads;
1890 
1891 	return br_offloads;
1892 }
1893 
1894 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1895 {
1896 	struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1897 
1898 	ASSERT_RTNL();
1899 
1900 	if (!br_offloads)
1901 		return;
1902 
1903 	mlx5_esw_bridge_flush(br_offloads);
1904 	WARN_ON(!xa_empty(&br_offloads->ports));
1905 
1906 	esw->br_offloads = NULL;
1907 	kvfree(br_offloads);
1908 }
1909