1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "lib/devcom.h"
5 #include "bridge.h"
6 #include "eswitch.h"
7 #include "bridge_priv.h"
8 
9 static const struct rhashtable_params mdb_ht_params = {
10 	.key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
11 	.key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
12 	.head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
13 	.automatic_shrinking = true,
14 };
15 
16 int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
17 {
18 	INIT_LIST_HEAD(&bridge->mdb_list);
19 	return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
20 }
21 
22 void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
23 {
24 	rhashtable_destroy(&bridge->mdb_ht);
25 }
26 
27 static struct mlx5_esw_bridge_port *
28 mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
29 				struct mlx5_esw_bridge_mdb_entry *entry)
30 {
31 	return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
32 }
33 
34 static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
35 					   struct mlx5_esw_bridge_mdb_entry *entry)
36 {
37 	int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
38 
39 	if (!err)
40 		entry->num_ports++;
41 	return err;
42 }
43 
44 static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
45 					    struct mlx5_esw_bridge_mdb_entry *entry)
46 {
47 	xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
48 	entry->num_ports--;
49 }
50 
51 static struct mlx5_flow_handle *
52 mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
53 				struct mlx5_esw_bridge *bridge)
54 {
55 	struct mlx5_flow_act flow_act = {
56 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
57 		.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
58 	};
59 	int num_dests = entry->num_ports, i = 0;
60 	struct mlx5_flow_destination *dests;
61 	struct mlx5_esw_bridge_port *port;
62 	struct mlx5_flow_spec *rule_spec;
63 	struct mlx5_flow_handle *handle;
64 	u8 *dmac_v, *dmac_c;
65 	unsigned long idx;
66 
67 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
68 	if (!rule_spec)
69 		return ERR_PTR(-ENOMEM);
70 
71 	dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
72 	if (!dests) {
73 		kvfree(rule_spec);
74 		return ERR_PTR(-ENOMEM);
75 	}
76 
77 	xa_for_each(&entry->ports, idx, port) {
78 		dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
79 		dests[i].ft = port->mcast.ft;
80 		i++;
81 	}
82 
83 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
84 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
85 	ether_addr_copy(dmac_v, entry->key.addr);
86 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
87 	eth_broadcast_addr(dmac_c);
88 
89 	if (entry->key.vid) {
90 		if (bridge->vlan_proto == ETH_P_8021Q) {
91 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
92 					 outer_headers.cvlan_tag);
93 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
94 					 outer_headers.cvlan_tag);
95 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
96 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
97 					 outer_headers.svlan_tag);
98 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
99 					 outer_headers.svlan_tag);
100 		}
101 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
102 				 outer_headers.first_vid);
103 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
104 			 entry->key.vid);
105 	}
106 
107 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
108 
109 	kvfree(dests);
110 	kvfree(rule_spec);
111 	return handle;
112 }
113 
114 static int
115 mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
116 				 struct mlx5_esw_bridge_mdb_entry *entry)
117 {
118 	struct mlx5_flow_handle *handle;
119 
120 	handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
121 	if (entry->egress_handle) {
122 		mlx5_del_flow_rules(entry->egress_handle);
123 		entry->egress_handle = NULL;
124 	}
125 	if (IS_ERR(handle))
126 		return PTR_ERR(handle);
127 
128 	entry->egress_handle = handle;
129 	return 0;
130 }
131 
132 static struct mlx5_esw_bridge_mdb_entry *
133 mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
134 			   const unsigned char *addr, u16 vid)
135 {
136 	struct mlx5_esw_bridge_mdb_key key = {};
137 
138 	ether_addr_copy(key.addr, addr);
139 	key.vid = vid;
140 	return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
141 }
142 
143 static struct mlx5_esw_bridge_mdb_entry *
144 mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
145 				    const unsigned char *addr, u16 vid)
146 {
147 	struct mlx5_esw_bridge *bridge = port->bridge;
148 	struct mlx5_esw_bridge_mdb_entry *entry;
149 	int err;
150 
151 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
152 	if (!entry)
153 		return ERR_PTR(-ENOMEM);
154 
155 	ether_addr_copy(entry->key.addr, addr);
156 	entry->key.vid = vid;
157 	xa_init(&entry->ports);
158 	err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
159 	if (err)
160 		goto err_ht_insert;
161 
162 	list_add(&entry->list, &bridge->mdb_list);
163 
164 	return entry;
165 
166 err_ht_insert:
167 	xa_destroy(&entry->ports);
168 	kvfree(entry);
169 	return ERR_PTR(err);
170 }
171 
172 static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
173 						   struct mlx5_esw_bridge_mdb_entry *entry)
174 {
175 	if (entry->egress_handle)
176 		mlx5_del_flow_rules(entry->egress_handle);
177 	list_del(&entry->list);
178 	rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
179 	xa_destroy(&entry->ports);
180 	kvfree(entry);
181 }
182 
183 int mlx5_esw_bridge_port_mdb_attach(struct mlx5_esw_bridge_port *port, const unsigned char *addr,
184 				    u16 vid)
185 {
186 	struct mlx5_esw_bridge *bridge = port->bridge;
187 	struct mlx5_esw_bridge_mdb_entry *entry;
188 	int err;
189 
190 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
191 		return -EOPNOTSUPP;
192 
193 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
194 	if (entry) {
195 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
196 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
197 				 addr, vid, port->vport_num);
198 			return 0;
199 		}
200 	} else {
201 		entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
202 		if (IS_ERR(entry)) {
203 			err = PTR_ERR(entry);
204 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
205 				 addr, vid, port->vport_num, err);
206 			return err;
207 		}
208 	}
209 
210 	err = mlx5_esw_bridge_mdb_port_insert(port, entry);
211 	if (err) {
212 		if (!entry->num_ports)
213 			mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
214 		esw_warn(bridge->br_offloads->esw->dev,
215 			 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
216 			 addr, vid, port->vport_num, err);
217 		return err;
218 	}
219 
220 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
221 	if (err)
222 		/* Single mdb can be used by multiple ports, so just log the
223 		 * error and continue.
224 		 */
225 		esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
226 			 addr, vid, port->vport_num, err);
227 	return 0;
228 }
229 
230 static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
231 						  struct mlx5_esw_bridge_mdb_entry *entry)
232 {
233 	struct mlx5_esw_bridge *bridge = port->bridge;
234 	int err;
235 
236 	mlx5_esw_bridge_mdb_port_remove(port, entry);
237 	if (!entry->num_ports) {
238 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
239 		return;
240 	}
241 
242 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
243 	if (err)
244 		/* Single mdb can be used by multiple ports, so just log the
245 		 * error and continue.
246 		 */
247 		esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
248 			 entry->key.addr, entry->key.vid, port->vport_num);
249 }
250 
251 void mlx5_esw_bridge_port_mdb_detach(struct mlx5_esw_bridge_port *port, const unsigned char *addr,
252 				     u16 vid)
253 {
254 	struct mlx5_esw_bridge *bridge = port->bridge;
255 	struct mlx5_esw_bridge_mdb_entry *entry;
256 
257 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
258 	if (!entry) {
259 		esw_debug(bridge->br_offloads->esw->dev,
260 			  "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
261 			  addr, vid, port->vport_num);
262 		return;
263 	}
264 
265 	if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
266 		esw_debug(bridge->br_offloads->esw->dev,
267 			  "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
268 			  addr, vid, port->vport_num);
269 		return;
270 	}
271 
272 	mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
273 }
274 
275 void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
276 					 struct mlx5_esw_bridge_vlan *vlan)
277 {
278 	struct mlx5_esw_bridge *bridge = port->bridge;
279 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
280 
281 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
282 		if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
283 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
284 }
285 
286 static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
287 {
288 	struct mlx5_esw_bridge *bridge = port->bridge;
289 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
290 
291 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
292 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
293 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
294 }
295 
296 void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
297 {
298 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
299 
300 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
301 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
302 }
303 static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
304 					       struct mlx5_esw_bridge *bridge)
305 {
306 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
307 	struct mlx5_flow_table *mcast_ft;
308 
309 	mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
310 						MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
311 						esw);
312 	if (IS_ERR(mcast_ft))
313 		return PTR_ERR(mcast_ft);
314 
315 	port->mcast.ft = mcast_ft;
316 	return 0;
317 }
318 
319 static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
320 {
321 	if (port->mcast.ft)
322 		mlx5_destroy_flow_table(port->mcast.ft);
323 	port->mcast.ft = NULL;
324 }
325 
326 static struct mlx5_flow_group *
327 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
328 				       struct mlx5_flow_table *mcast_ft)
329 {
330 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
331 	struct mlx5_flow_group *fg;
332 	u32 *in, *match;
333 
334 	in = kvzalloc(inlen, GFP_KERNEL);
335 	if (!in)
336 		return ERR_PTR(-ENOMEM);
337 
338 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
339 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
340 
341 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
342 		 mlx5_eswitch_get_vport_metadata_mask());
343 
344 	MLX5_SET(create_flow_group_in, in, start_flow_index,
345 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
346 	MLX5_SET(create_flow_group_in, in, end_flow_index,
347 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
348 
349 	fg = mlx5_create_flow_group(mcast_ft, in);
350 	kvfree(in);
351 	if (IS_ERR(fg))
352 		esw_warn(esw->dev,
353 			 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
354 			 fg);
355 
356 	return fg;
357 }
358 
359 static struct mlx5_flow_group *
360 mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
361 					   struct mlx5_eswitch *esw,
362 					   struct mlx5_flow_table *mcast_ft)
363 {
364 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
365 	struct mlx5_flow_group *fg;
366 	u32 *in, *match;
367 
368 	in = kvzalloc(inlen, GFP_KERNEL);
369 	if (!in)
370 		return ERR_PTR(-ENOMEM);
371 
372 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
373 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
374 
375 	if (vlan_proto == ETH_P_8021Q)
376 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
377 	else if (vlan_proto == ETH_P_8021AD)
378 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
379 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
380 
381 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
382 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
383 
384 	fg = mlx5_create_flow_group(mcast_ft, in);
385 	kvfree(in);
386 	if (IS_ERR(fg))
387 		esw_warn(esw->dev,
388 			 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
389 			 vlan_proto, fg);
390 
391 	return fg;
392 }
393 
394 static struct mlx5_flow_group *
395 mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
396 {
397 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
398 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
399 
400 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
401 }
402 
403 static struct mlx5_flow_group *
404 mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
405 				     struct mlx5_flow_table *mcast_ft)
406 {
407 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
408 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
409 
410 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
411 }
412 
413 static struct mlx5_flow_group *
414 mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
415 				    struct mlx5_flow_table *mcast_ft)
416 {
417 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
418 	struct mlx5_flow_group *fg;
419 	u32 *in;
420 
421 	in = kvzalloc(inlen, GFP_KERNEL);
422 	if (!in)
423 		return ERR_PTR(-ENOMEM);
424 
425 	MLX5_SET(create_flow_group_in, in, start_flow_index,
426 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
427 	MLX5_SET(create_flow_group_in, in, end_flow_index,
428 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
429 
430 	fg = mlx5_create_flow_group(mcast_ft, in);
431 	kvfree(in);
432 	if (IS_ERR(fg))
433 		esw_warn(esw->dev,
434 			 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
435 			 fg);
436 
437 	return fg;
438 }
439 
440 static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
441 {
442 	struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
443 	struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
444 	struct mlx5_flow_table *mcast_ft = port->mcast.ft;
445 	int err;
446 
447 	filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
448 	if (IS_ERR(filter_fg))
449 		return PTR_ERR(filter_fg);
450 
451 	vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
452 	if (IS_ERR(vlan_fg)) {
453 		err = PTR_ERR(vlan_fg);
454 		goto err_vlan_fg;
455 	}
456 
457 	qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
458 	if (IS_ERR(qinq_fg)) {
459 		err = PTR_ERR(qinq_fg);
460 		goto err_qinq_fg;
461 	}
462 
463 	fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
464 	if (IS_ERR(fwd_fg)) {
465 		err = PTR_ERR(fwd_fg);
466 		goto err_fwd_fg;
467 	}
468 
469 	port->mcast.filter_fg = filter_fg;
470 	port->mcast.vlan_fg = vlan_fg;
471 	port->mcast.qinq_fg = qinq_fg;
472 	port->mcast.fwd_fg = fwd_fg;
473 
474 	return 0;
475 
476 err_fwd_fg:
477 	mlx5_destroy_flow_group(qinq_fg);
478 err_qinq_fg:
479 	mlx5_destroy_flow_group(vlan_fg);
480 err_vlan_fg:
481 	mlx5_destroy_flow_group(filter_fg);
482 	return err;
483 }
484 
485 static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
486 {
487 	if (port->mcast.fwd_fg)
488 		mlx5_destroy_flow_group(port->mcast.fwd_fg);
489 	port->mcast.fwd_fg = NULL;
490 	if (port->mcast.qinq_fg)
491 		mlx5_destroy_flow_group(port->mcast.qinq_fg);
492 	port->mcast.qinq_fg = NULL;
493 	if (port->mcast.vlan_fg)
494 		mlx5_destroy_flow_group(port->mcast.vlan_fg);
495 	port->mcast.vlan_fg = NULL;
496 	if (port->mcast.filter_fg)
497 		mlx5_destroy_flow_group(port->mcast.filter_fg);
498 	port->mcast.filter_fg = NULL;
499 }
500 
501 static struct mlx5_flow_handle *
502 mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
503 					   struct mlx5_eswitch *esw)
504 {
505 	struct mlx5_flow_act flow_act = {
506 		.action = MLX5_FLOW_CONTEXT_ACTION_DROP,
507 		.flags = FLOW_ACT_NO_APPEND,
508 	};
509 	struct mlx5_flow_spec *rule_spec;
510 	struct mlx5_flow_handle *handle;
511 
512 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
513 	if (!rule_spec)
514 		return ERR_PTR(-ENOMEM);
515 
516 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
517 
518 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
519 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
520 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
521 		 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
522 
523 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
524 
525 	kvfree(rule_spec);
526 	return handle;
527 }
528 
529 static struct mlx5_flow_handle *
530 mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
531 {
532 	return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
533 }
534 
535 static struct mlx5_flow_handle *
536 mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
537 {
538 	struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
539 	static struct mlx5_flow_handle *handle;
540 	struct mlx5_eswitch *peer_esw;
541 
542 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
543 	if (!peer_esw)
544 		return ERR_PTR(-ENODEV);
545 
546 	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
547 
548 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
549 	return handle;
550 }
551 
552 static struct mlx5_flow_handle *
553 mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
554 				       struct mlx5_esw_bridge_vlan *vlan)
555 {
556 	struct mlx5_flow_act flow_act = {
557 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
558 		.flags = FLOW_ACT_NO_APPEND,
559 	};
560 	struct mlx5_flow_destination dest = {
561 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
562 		.vport.num = port->vport_num,
563 	};
564 	struct mlx5_esw_bridge *bridge = port->bridge;
565 	struct mlx5_flow_spec *rule_spec;
566 	struct mlx5_flow_handle *handle;
567 
568 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
569 	if (!rule_spec)
570 		return ERR_PTR(-ENOMEM);
571 
572 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
573 	    port->vport_num == MLX5_VPORT_UPLINK)
574 		rule_spec->flow_context.flow_source =
575 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
576 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
577 
578 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
579 	flow_act.pkt_reformat = vlan->pkt_reformat_pop;
580 
581 	if (vlan_proto == ETH_P_8021Q) {
582 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
583 				 outer_headers.cvlan_tag);
584 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
585 				 outer_headers.cvlan_tag);
586 	} else if (vlan_proto == ETH_P_8021AD) {
587 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
588 				 outer_headers.svlan_tag);
589 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
590 				 outer_headers.svlan_tag);
591 	}
592 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
593 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
594 
595 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
596 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
597 		dest.vport.vhca_id = port->esw_owner_vhca_id;
598 	}
599 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
600 
601 	kvfree(rule_spec);
602 	return handle;
603 }
604 
605 int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
606 				    struct mlx5_esw_bridge_vlan *vlan)
607 {
608 	struct mlx5_flow_handle *handle;
609 
610 	if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
611 		return 0;
612 
613 	handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
614 	if (IS_ERR(handle))
615 		return PTR_ERR(handle);
616 
617 	vlan->mcast_handle = handle;
618 	return 0;
619 }
620 
621 void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
622 {
623 	if (vlan->mcast_handle)
624 		mlx5_del_flow_rules(vlan->mcast_handle);
625 	vlan->mcast_handle = NULL;
626 }
627 
628 static struct mlx5_flow_handle *
629 mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
630 {
631 	struct mlx5_flow_act flow_act = {
632 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
633 		.flags = FLOW_ACT_NO_APPEND,
634 	};
635 	struct mlx5_flow_destination dest = {
636 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
637 		.vport.num = port->vport_num,
638 	};
639 	struct mlx5_esw_bridge *bridge = port->bridge;
640 	struct mlx5_flow_spec *rule_spec;
641 	struct mlx5_flow_handle *handle;
642 
643 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
644 	if (!rule_spec)
645 		return ERR_PTR(-ENOMEM);
646 
647 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
648 	    port->vport_num == MLX5_VPORT_UPLINK)
649 		rule_spec->flow_context.flow_source =
650 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
651 
652 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
653 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
654 		dest.vport.vhca_id = port->esw_owner_vhca_id;
655 	}
656 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
657 
658 	kvfree(rule_spec);
659 	return handle;
660 }
661 
662 static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
663 {
664 	struct mlx5_flow_handle *filter_handle, *fwd_handle;
665 	struct mlx5_esw_bridge_vlan *vlan, *failed;
666 	unsigned long index;
667 	int err;
668 
669 
670 	filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
671 		mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
672 		mlx5_esw_bridge_mcast_filter_flow_create(port);
673 	if (IS_ERR(filter_handle))
674 		return PTR_ERR(filter_handle);
675 
676 	fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
677 	if (IS_ERR(fwd_handle)) {
678 		err = PTR_ERR(fwd_handle);
679 		goto err_fwd;
680 	}
681 
682 	xa_for_each(&port->vlans, index, vlan) {
683 		err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
684 		if (err) {
685 			failed = vlan;
686 			goto err_vlan;
687 		}
688 	}
689 
690 	port->mcast.filter_handle = filter_handle;
691 	port->mcast.fwd_handle = fwd_handle;
692 
693 	return 0;
694 
695 err_vlan:
696 	xa_for_each(&port->vlans, index, vlan) {
697 		if (vlan == failed)
698 			break;
699 
700 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
701 	}
702 	mlx5_del_flow_rules(fwd_handle);
703 err_fwd:
704 	mlx5_del_flow_rules(filter_handle);
705 	return err;
706 }
707 
708 static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
709 {
710 	struct mlx5_esw_bridge_vlan *vlan;
711 	unsigned long index;
712 
713 	xa_for_each(&port->vlans, index, vlan)
714 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
715 
716 	if (port->mcast.fwd_handle)
717 		mlx5_del_flow_rules(port->mcast.fwd_handle);
718 	port->mcast.fwd_handle = NULL;
719 	if (port->mcast.filter_handle)
720 		mlx5_del_flow_rules(port->mcast.filter_handle);
721 	port->mcast.filter_handle = NULL;
722 }
723 
724 int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
725 {
726 	struct mlx5_esw_bridge *bridge = port->bridge;
727 	int err;
728 
729 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
730 		return 0;
731 
732 	err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
733 	if (err)
734 		return err;
735 
736 	err = mlx5_esw_bridge_port_mcast_fgs_init(port);
737 	if (err)
738 		goto err_fgs;
739 
740 	err = mlx5_esw_bridge_port_mcast_fhs_init(port);
741 	if (err)
742 		goto err_fhs;
743 	return err;
744 
745 err_fhs:
746 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
747 err_fgs:
748 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
749 	return err;
750 }
751 
752 void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
753 {
754 	mlx5_esw_bridge_port_mdb_flush(port);
755 	mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
756 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
757 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
758 }
759 
760 static struct mlx5_flow_group *
761 mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
762 				       struct mlx5_flow_table *ingress_ft)
763 {
764 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
765 	struct mlx5_flow_group *fg;
766 	u32 *in, *match;
767 
768 	in = kvzalloc(inlen, GFP_KERNEL);
769 	if (!in)
770 		return ERR_PTR(-ENOMEM);
771 
772 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
773 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
774 
775 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
776 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
777 
778 	MLX5_SET(create_flow_group_in, in, start_flow_index,
779 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
780 	MLX5_SET(create_flow_group_in, in, end_flow_index,
781 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
782 
783 	fg = mlx5_create_flow_group(ingress_ft, in);
784 	kvfree(in);
785 	if (IS_ERR(fg))
786 		esw_warn(esw->dev,
787 			 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
788 			 fg);
789 
790 	return fg;
791 }
792 
793 static struct mlx5_flow_group *
794 mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
795 				      struct mlx5_flow_table *ingress_ft)
796 {
797 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
798 	struct mlx5_flow_group *fg;
799 	u32 *in, *match;
800 
801 	if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
802 		esw_warn(esw->dev,
803 			 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
804 		return NULL;
805 	}
806 
807 	in = kvzalloc(inlen, GFP_KERNEL);
808 	if (!in)
809 		return ERR_PTR(-ENOMEM);
810 
811 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
812 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
813 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
814 
815 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
816 	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
817 
818 	MLX5_SET(create_flow_group_in, in, start_flow_index,
819 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
820 	MLX5_SET(create_flow_group_in, in, end_flow_index,
821 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
822 
823 	fg = mlx5_create_flow_group(ingress_ft, in);
824 	kvfree(in);
825 	if (IS_ERR(fg))
826 		esw_warn(esw->dev,
827 			 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
828 			 fg);
829 
830 	return fg;
831 }
832 
833 static int
834 mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
835 {
836 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
837 	struct mlx5_eswitch *esw = br_offloads->esw;
838 	struct mlx5_flow_group *igmp_fg, *mld_fg;
839 
840 	igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
841 	if (IS_ERR(igmp_fg))
842 		return PTR_ERR(igmp_fg);
843 
844 	mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
845 	if (IS_ERR(mld_fg)) {
846 		mlx5_destroy_flow_group(igmp_fg);
847 		return PTR_ERR(mld_fg);
848 	}
849 
850 	br_offloads->ingress_igmp_fg = igmp_fg;
851 	br_offloads->ingress_mld_fg = mld_fg;
852 	return 0;
853 }
854 
855 static void
856 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
857 {
858 	if (br_offloads->ingress_mld_fg)
859 		mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
860 	br_offloads->ingress_mld_fg = NULL;
861 	if (br_offloads->ingress_igmp_fg)
862 		mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
863 	br_offloads->ingress_igmp_fg = NULL;
864 }
865 
866 static struct mlx5_flow_handle *
867 mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
868 				       struct mlx5_flow_table *skip_ft)
869 {
870 	struct mlx5_flow_destination dest = {
871 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
872 		.ft = skip_ft,
873 	};
874 	struct mlx5_flow_act flow_act = {
875 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
876 		.flags = FLOW_ACT_NO_APPEND,
877 	};
878 	struct mlx5_flow_spec *rule_spec;
879 	struct mlx5_flow_handle *handle;
880 
881 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
882 	if (!rule_spec)
883 		return ERR_PTR(-ENOMEM);
884 
885 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
886 
887 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
888 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
889 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
890 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
891 
892 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
893 
894 	kvfree(rule_spec);
895 	return handle;
896 }
897 
898 static struct mlx5_flow_handle *
899 mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
900 				      struct mlx5_flow_table *skip_ft)
901 {
902 	struct mlx5_flow_destination dest = {
903 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
904 		.ft = skip_ft,
905 	};
906 	struct mlx5_flow_act flow_act = {
907 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
908 		.flags = FLOW_ACT_NO_APPEND,
909 	};
910 	struct mlx5_flow_spec *rule_spec;
911 	struct mlx5_flow_handle *handle;
912 
913 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
914 	if (!rule_spec)
915 		return ERR_PTR(-ENOMEM);
916 
917 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
918 
919 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
920 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
921 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
922 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
923 
924 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
925 
926 	kvfree(rule_spec);
927 	return handle;
928 }
929 
930 static int
931 mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
932 {
933 	struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
934 		*mld_done_handle;
935 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
936 		*skip_ft = br_offloads->skip_ft;
937 	int err;
938 
939 	igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
940 	if (IS_ERR(igmp_handle))
941 		return PTR_ERR(igmp_handle);
942 
943 	if (br_offloads->ingress_mld_fg) {
944 		mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
945 									 ingress_ft,
946 									 skip_ft);
947 		if (IS_ERR(mld_query_handle)) {
948 			err = PTR_ERR(mld_query_handle);
949 			goto err_mld_query;
950 		}
951 
952 		mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
953 									  ingress_ft,
954 									  skip_ft);
955 		if (IS_ERR(mld_report_handle)) {
956 			err = PTR_ERR(mld_report_handle);
957 			goto err_mld_report;
958 		}
959 
960 		mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
961 									ingress_ft,
962 									skip_ft);
963 		if (IS_ERR(mld_done_handle)) {
964 			err = PTR_ERR(mld_done_handle);
965 			goto err_mld_done;
966 		}
967 	} else {
968 		mld_query_handle = NULL;
969 		mld_report_handle = NULL;
970 		mld_done_handle = NULL;
971 	}
972 
973 	br_offloads->igmp_handle = igmp_handle;
974 	br_offloads->mld_query_handle = mld_query_handle;
975 	br_offloads->mld_report_handle = mld_report_handle;
976 	br_offloads->mld_done_handle = mld_done_handle;
977 
978 	return 0;
979 
980 err_mld_done:
981 	mlx5_del_flow_rules(mld_report_handle);
982 err_mld_report:
983 	mlx5_del_flow_rules(mld_query_handle);
984 err_mld_query:
985 	mlx5_del_flow_rules(igmp_handle);
986 	return err;
987 }
988 
989 static void
990 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
991 {
992 	if (br_offloads->mld_done_handle)
993 		mlx5_del_flow_rules(br_offloads->mld_done_handle);
994 	br_offloads->mld_done_handle = NULL;
995 	if (br_offloads->mld_report_handle)
996 		mlx5_del_flow_rules(br_offloads->mld_report_handle);
997 	br_offloads->mld_report_handle = NULL;
998 	if (br_offloads->mld_query_handle)
999 		mlx5_del_flow_rules(br_offloads->mld_query_handle);
1000 	br_offloads->mld_query_handle = NULL;
1001 	if (br_offloads->igmp_handle)
1002 		mlx5_del_flow_rules(br_offloads->igmp_handle);
1003 	br_offloads->igmp_handle = NULL;
1004 }
1005 
1006 static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1007 {
1008 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1009 	struct mlx5_esw_bridge_port *port, *failed;
1010 	unsigned long i;
1011 	int err;
1012 
1013 	xa_for_each(&br_offloads->ports, i, port) {
1014 		if (port->bridge != bridge)
1015 			continue;
1016 
1017 		err = mlx5_esw_bridge_port_mcast_init(port);
1018 		if (err) {
1019 			failed = port;
1020 			goto err_port;
1021 		}
1022 	}
1023 	return 0;
1024 
1025 err_port:
1026 	xa_for_each(&br_offloads->ports, i, port) {
1027 		if (port == failed)
1028 			break;
1029 		if (port->bridge != bridge)
1030 			continue;
1031 
1032 		mlx5_esw_bridge_port_mcast_cleanup(port);
1033 	}
1034 	return err;
1035 }
1036 
1037 static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1038 {
1039 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1040 	struct mlx5_esw_bridge_port *port;
1041 	unsigned long i;
1042 
1043 	xa_for_each(&br_offloads->ports, i, port) {
1044 		if (port->bridge != bridge)
1045 			continue;
1046 
1047 		mlx5_esw_bridge_port_mcast_cleanup(port);
1048 	}
1049 }
1050 
1051 static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1052 {
1053 	int err;
1054 
1055 	if (br_offloads->ingress_igmp_fg)
1056 		return 0; /* already enabled by another bridge */
1057 
1058 	err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1059 	if (err) {
1060 		esw_warn(br_offloads->esw->dev,
1061 			 "Failed to create global multicast flow groups (err=%d)\n",
1062 			 err);
1063 		return err;
1064 	}
1065 
1066 	err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1067 	if (err) {
1068 		esw_warn(br_offloads->esw->dev,
1069 			 "Failed to create global multicast flows (err=%d)\n",
1070 			 err);
1071 		goto err_fhs;
1072 	}
1073 
1074 	return 0;
1075 
1076 err_fhs:
1077 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1078 	return err;
1079 }
1080 
1081 static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1082 {
1083 	struct mlx5_esw_bridge *br;
1084 
1085 	list_for_each_entry(br, &br_offloads->bridges, list) {
1086 		/* Ingress table is global, so only disable snooping when all
1087 		 * bridges on esw have multicast disabled.
1088 		 */
1089 		if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1090 			return;
1091 	}
1092 
1093 	mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1094 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1095 }
1096 
1097 int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1098 {
1099 	int err;
1100 
1101 	err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1102 	if (err)
1103 		return err;
1104 
1105 	bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1106 
1107 	err = mlx5_esw_brige_mcast_init(bridge);
1108 	if (err) {
1109 		esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1110 			 err);
1111 		bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1112 		mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1113 	}
1114 	return err;
1115 }
1116 
1117 void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1118 {
1119 	mlx5_esw_brige_mcast_cleanup(bridge);
1120 	bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1121 	mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1122 }
1123