1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "lib/devcom.h"
5 #include "bridge.h"
6 #include "eswitch.h"
7 #include "bridge_priv.h"
8 #include "diag/bridge_tracepoint.h"
9 
10 static const struct rhashtable_params mdb_ht_params = {
11 	.key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
12 	.key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
13 	.head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
14 	.automatic_shrinking = true,
15 };
16 
17 int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
18 {
19 	INIT_LIST_HEAD(&bridge->mdb_list);
20 	return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
21 }
22 
23 void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
24 {
25 	rhashtable_destroy(&bridge->mdb_ht);
26 }
27 
28 static struct mlx5_esw_bridge_port *
29 mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
30 				struct mlx5_esw_bridge_mdb_entry *entry)
31 {
32 	return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
33 }
34 
35 static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
36 					   struct mlx5_esw_bridge_mdb_entry *entry)
37 {
38 	int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
39 
40 	if (!err)
41 		entry->num_ports++;
42 	return err;
43 }
44 
45 static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
46 					    struct mlx5_esw_bridge_mdb_entry *entry)
47 {
48 	xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
49 	entry->num_ports--;
50 }
51 
52 static struct mlx5_flow_handle *
53 mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
54 				struct mlx5_esw_bridge *bridge)
55 {
56 	struct mlx5_flow_act flow_act = {
57 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
58 		.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
59 	};
60 	int num_dests = entry->num_ports, i = 0;
61 	struct mlx5_flow_destination *dests;
62 	struct mlx5_esw_bridge_port *port;
63 	struct mlx5_flow_spec *rule_spec;
64 	struct mlx5_flow_handle *handle;
65 	u8 *dmac_v, *dmac_c;
66 	unsigned long idx;
67 
68 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
69 	if (!rule_spec)
70 		return ERR_PTR(-ENOMEM);
71 
72 	dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
73 	if (!dests) {
74 		kvfree(rule_spec);
75 		return ERR_PTR(-ENOMEM);
76 	}
77 
78 	xa_for_each(&entry->ports, idx, port) {
79 		dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
80 		dests[i].ft = port->mcast.ft;
81 		i++;
82 	}
83 
84 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
85 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
86 	ether_addr_copy(dmac_v, entry->key.addr);
87 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
88 	eth_broadcast_addr(dmac_c);
89 
90 	if (entry->key.vid) {
91 		if (bridge->vlan_proto == ETH_P_8021Q) {
92 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
93 					 outer_headers.cvlan_tag);
94 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
95 					 outer_headers.cvlan_tag);
96 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
97 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
98 					 outer_headers.svlan_tag);
99 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
100 					 outer_headers.svlan_tag);
101 		}
102 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
103 				 outer_headers.first_vid);
104 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
105 			 entry->key.vid);
106 	}
107 
108 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
109 
110 	kvfree(dests);
111 	kvfree(rule_spec);
112 	return handle;
113 }
114 
115 static int
116 mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
117 				 struct mlx5_esw_bridge_mdb_entry *entry)
118 {
119 	struct mlx5_flow_handle *handle;
120 
121 	handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
122 	if (entry->egress_handle) {
123 		mlx5_del_flow_rules(entry->egress_handle);
124 		entry->egress_handle = NULL;
125 	}
126 	if (IS_ERR(handle))
127 		return PTR_ERR(handle);
128 
129 	entry->egress_handle = handle;
130 	return 0;
131 }
132 
133 static struct mlx5_esw_bridge_mdb_entry *
134 mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
135 			   const unsigned char *addr, u16 vid)
136 {
137 	struct mlx5_esw_bridge_mdb_key key = {};
138 
139 	ether_addr_copy(key.addr, addr);
140 	key.vid = vid;
141 	return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
142 }
143 
144 static struct mlx5_esw_bridge_mdb_entry *
145 mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
146 				    const unsigned char *addr, u16 vid)
147 {
148 	struct mlx5_esw_bridge *bridge = port->bridge;
149 	struct mlx5_esw_bridge_mdb_entry *entry;
150 	int err;
151 
152 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
153 	if (!entry)
154 		return ERR_PTR(-ENOMEM);
155 
156 	ether_addr_copy(entry->key.addr, addr);
157 	entry->key.vid = vid;
158 	xa_init(&entry->ports);
159 	err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
160 	if (err)
161 		goto err_ht_insert;
162 
163 	list_add(&entry->list, &bridge->mdb_list);
164 
165 	return entry;
166 
167 err_ht_insert:
168 	xa_destroy(&entry->ports);
169 	kvfree(entry);
170 	return ERR_PTR(err);
171 }
172 
173 static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
174 						   struct mlx5_esw_bridge_mdb_entry *entry)
175 {
176 	if (entry->egress_handle)
177 		mlx5_del_flow_rules(entry->egress_handle);
178 	list_del(&entry->list);
179 	rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
180 	xa_destroy(&entry->ports);
181 	kvfree(entry);
182 }
183 
184 int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
185 				    const unsigned char *addr, u16 vid)
186 {
187 	struct mlx5_esw_bridge *bridge = port->bridge;
188 	struct mlx5_esw_bridge_mdb_entry *entry;
189 	int err;
190 
191 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
192 		return -EOPNOTSUPP;
193 
194 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
195 	if (entry) {
196 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
197 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
198 				 addr, vid, port->vport_num);
199 			return 0;
200 		}
201 	} else {
202 		entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
203 		if (IS_ERR(entry)) {
204 			err = PTR_ERR(entry);
205 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
206 				 addr, vid, port->vport_num, err);
207 			return err;
208 		}
209 	}
210 
211 	err = mlx5_esw_bridge_mdb_port_insert(port, entry);
212 	if (err) {
213 		if (!entry->num_ports)
214 			mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
215 		esw_warn(bridge->br_offloads->esw->dev,
216 			 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
217 			 addr, vid, port->vport_num, err);
218 		return err;
219 	}
220 
221 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
222 	if (err)
223 		/* Single mdb can be used by multiple ports, so just log the
224 		 * error and continue.
225 		 */
226 		esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
227 			 addr, vid, port->vport_num, err);
228 
229 	trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
230 	return 0;
231 }
232 
233 static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
234 						  struct mlx5_esw_bridge_mdb_entry *entry)
235 {
236 	struct mlx5_esw_bridge *bridge = port->bridge;
237 	int err;
238 
239 	mlx5_esw_bridge_mdb_port_remove(port, entry);
240 	if (!entry->num_ports) {
241 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
242 		return;
243 	}
244 
245 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
246 	if (err)
247 		/* Single mdb can be used by multiple ports, so just log the
248 		 * error and continue.
249 		 */
250 		esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
251 			 entry->key.addr, entry->key.vid, port->vport_num);
252 }
253 
254 void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
255 				     const unsigned char *addr, u16 vid)
256 {
257 	struct mlx5_esw_bridge *bridge = port->bridge;
258 	struct mlx5_esw_bridge_mdb_entry *entry;
259 
260 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
261 	if (!entry) {
262 		esw_debug(bridge->br_offloads->esw->dev,
263 			  "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
264 			  addr, vid, port->vport_num);
265 		return;
266 	}
267 
268 	if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
269 		esw_debug(bridge->br_offloads->esw->dev,
270 			  "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
271 			  addr, vid, port->vport_num);
272 		return;
273 	}
274 
275 	trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
276 	mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
277 }
278 
279 void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
280 					 struct mlx5_esw_bridge_vlan *vlan)
281 {
282 	struct mlx5_esw_bridge *bridge = port->bridge;
283 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
284 
285 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
286 		if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
287 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
288 }
289 
290 static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
291 {
292 	struct mlx5_esw_bridge *bridge = port->bridge;
293 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
294 
295 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
296 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
297 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
298 }
299 
300 void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
301 {
302 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
303 
304 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
305 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
306 }
307 static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
308 					       struct mlx5_esw_bridge *bridge)
309 {
310 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
311 	struct mlx5_flow_table *mcast_ft;
312 
313 	mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
314 						MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
315 						esw);
316 	if (IS_ERR(mcast_ft))
317 		return PTR_ERR(mcast_ft);
318 
319 	port->mcast.ft = mcast_ft;
320 	return 0;
321 }
322 
323 static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
324 {
325 	if (port->mcast.ft)
326 		mlx5_destroy_flow_table(port->mcast.ft);
327 	port->mcast.ft = NULL;
328 }
329 
330 static struct mlx5_flow_group *
331 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
332 				       struct mlx5_flow_table *mcast_ft)
333 {
334 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
335 	struct mlx5_flow_group *fg;
336 	u32 *in, *match;
337 
338 	in = kvzalloc(inlen, GFP_KERNEL);
339 	if (!in)
340 		return ERR_PTR(-ENOMEM);
341 
342 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
343 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
344 
345 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
346 		 mlx5_eswitch_get_vport_metadata_mask());
347 
348 	MLX5_SET(create_flow_group_in, in, start_flow_index,
349 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
350 	MLX5_SET(create_flow_group_in, in, end_flow_index,
351 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
352 
353 	fg = mlx5_create_flow_group(mcast_ft, in);
354 	kvfree(in);
355 	if (IS_ERR(fg))
356 		esw_warn(esw->dev,
357 			 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
358 			 fg);
359 
360 	return fg;
361 }
362 
363 static struct mlx5_flow_group *
364 mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
365 					   struct mlx5_eswitch *esw,
366 					   struct mlx5_flow_table *mcast_ft)
367 {
368 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
369 	struct mlx5_flow_group *fg;
370 	u32 *in, *match;
371 
372 	in = kvzalloc(inlen, GFP_KERNEL);
373 	if (!in)
374 		return ERR_PTR(-ENOMEM);
375 
376 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
377 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
378 
379 	if (vlan_proto == ETH_P_8021Q)
380 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
381 	else if (vlan_proto == ETH_P_8021AD)
382 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
383 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
384 
385 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
386 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
387 
388 	fg = mlx5_create_flow_group(mcast_ft, in);
389 	kvfree(in);
390 	if (IS_ERR(fg))
391 		esw_warn(esw->dev,
392 			 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
393 			 vlan_proto, fg);
394 
395 	return fg;
396 }
397 
398 static struct mlx5_flow_group *
399 mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
400 {
401 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
402 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
403 
404 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
405 }
406 
407 static struct mlx5_flow_group *
408 mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
409 				     struct mlx5_flow_table *mcast_ft)
410 {
411 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
412 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
413 
414 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
415 }
416 
417 static struct mlx5_flow_group *
418 mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
419 				    struct mlx5_flow_table *mcast_ft)
420 {
421 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
422 	struct mlx5_flow_group *fg;
423 	u32 *in;
424 
425 	in = kvzalloc(inlen, GFP_KERNEL);
426 	if (!in)
427 		return ERR_PTR(-ENOMEM);
428 
429 	MLX5_SET(create_flow_group_in, in, start_flow_index,
430 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
431 	MLX5_SET(create_flow_group_in, in, end_flow_index,
432 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
433 
434 	fg = mlx5_create_flow_group(mcast_ft, in);
435 	kvfree(in);
436 	if (IS_ERR(fg))
437 		esw_warn(esw->dev,
438 			 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
439 			 fg);
440 
441 	return fg;
442 }
443 
444 static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
445 {
446 	struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
447 	struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
448 	struct mlx5_flow_table *mcast_ft = port->mcast.ft;
449 	int err;
450 
451 	filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
452 	if (IS_ERR(filter_fg))
453 		return PTR_ERR(filter_fg);
454 
455 	vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
456 	if (IS_ERR(vlan_fg)) {
457 		err = PTR_ERR(vlan_fg);
458 		goto err_vlan_fg;
459 	}
460 
461 	qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
462 	if (IS_ERR(qinq_fg)) {
463 		err = PTR_ERR(qinq_fg);
464 		goto err_qinq_fg;
465 	}
466 
467 	fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
468 	if (IS_ERR(fwd_fg)) {
469 		err = PTR_ERR(fwd_fg);
470 		goto err_fwd_fg;
471 	}
472 
473 	port->mcast.filter_fg = filter_fg;
474 	port->mcast.vlan_fg = vlan_fg;
475 	port->mcast.qinq_fg = qinq_fg;
476 	port->mcast.fwd_fg = fwd_fg;
477 
478 	return 0;
479 
480 err_fwd_fg:
481 	mlx5_destroy_flow_group(qinq_fg);
482 err_qinq_fg:
483 	mlx5_destroy_flow_group(vlan_fg);
484 err_vlan_fg:
485 	mlx5_destroy_flow_group(filter_fg);
486 	return err;
487 }
488 
489 static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
490 {
491 	if (port->mcast.fwd_fg)
492 		mlx5_destroy_flow_group(port->mcast.fwd_fg);
493 	port->mcast.fwd_fg = NULL;
494 	if (port->mcast.qinq_fg)
495 		mlx5_destroy_flow_group(port->mcast.qinq_fg);
496 	port->mcast.qinq_fg = NULL;
497 	if (port->mcast.vlan_fg)
498 		mlx5_destroy_flow_group(port->mcast.vlan_fg);
499 	port->mcast.vlan_fg = NULL;
500 	if (port->mcast.filter_fg)
501 		mlx5_destroy_flow_group(port->mcast.filter_fg);
502 	port->mcast.filter_fg = NULL;
503 }
504 
505 static struct mlx5_flow_handle *
506 mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
507 					   struct mlx5_eswitch *esw)
508 {
509 	struct mlx5_flow_act flow_act = {
510 		.action = MLX5_FLOW_CONTEXT_ACTION_DROP,
511 		.flags = FLOW_ACT_NO_APPEND,
512 	};
513 	struct mlx5_flow_spec *rule_spec;
514 	struct mlx5_flow_handle *handle;
515 
516 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
517 	if (!rule_spec)
518 		return ERR_PTR(-ENOMEM);
519 
520 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
521 
522 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
523 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
524 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
525 		 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
526 
527 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
528 
529 	kvfree(rule_spec);
530 	return handle;
531 }
532 
533 static struct mlx5_flow_handle *
534 mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
535 {
536 	return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
537 }
538 
539 static struct mlx5_flow_handle *
540 mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
541 {
542 	struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
543 	struct mlx5_eswitch *tmp, *peer_esw = NULL;
544 	static struct mlx5_flow_handle *handle;
545 
546 	if (!mlx5_devcom_for_each_peer_begin(devcom))
547 		return ERR_PTR(-ENODEV);
548 
549 	mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
550 		if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
551 			peer_esw = tmp;
552 			break;
553 		}
554 	}
555 
556 	if (!peer_esw) {
557 		handle = ERR_PTR(-ENODEV);
558 		goto out;
559 	}
560 
561 	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
562 
563 out:
564 	mlx5_devcom_for_each_peer_end(devcom);
565 	return handle;
566 }
567 
568 static struct mlx5_flow_handle *
569 mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
570 				       struct mlx5_esw_bridge_vlan *vlan)
571 {
572 	struct mlx5_flow_act flow_act = {
573 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
574 		.flags = FLOW_ACT_NO_APPEND,
575 	};
576 	struct mlx5_flow_destination dest = {
577 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
578 		.vport.num = port->vport_num,
579 	};
580 	struct mlx5_esw_bridge *bridge = port->bridge;
581 	struct mlx5_flow_spec *rule_spec;
582 	struct mlx5_flow_handle *handle;
583 
584 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
585 	if (!rule_spec)
586 		return ERR_PTR(-ENOMEM);
587 
588 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
589 	    port->vport_num == MLX5_VPORT_UPLINK)
590 		rule_spec->flow_context.flow_source =
591 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
592 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
593 
594 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
595 	flow_act.pkt_reformat = vlan->pkt_reformat_pop;
596 
597 	if (vlan_proto == ETH_P_8021Q) {
598 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
599 				 outer_headers.cvlan_tag);
600 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
601 				 outer_headers.cvlan_tag);
602 	} else if (vlan_proto == ETH_P_8021AD) {
603 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
604 				 outer_headers.svlan_tag);
605 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
606 				 outer_headers.svlan_tag);
607 	}
608 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
609 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
610 
611 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
612 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
613 		dest.vport.vhca_id = port->esw_owner_vhca_id;
614 	}
615 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
616 
617 	kvfree(rule_spec);
618 	return handle;
619 }
620 
621 int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
622 				    struct mlx5_esw_bridge_vlan *vlan)
623 {
624 	struct mlx5_flow_handle *handle;
625 
626 	if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
627 		return 0;
628 
629 	handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
630 	if (IS_ERR(handle))
631 		return PTR_ERR(handle);
632 
633 	vlan->mcast_handle = handle;
634 	return 0;
635 }
636 
637 void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
638 {
639 	if (vlan->mcast_handle)
640 		mlx5_del_flow_rules(vlan->mcast_handle);
641 	vlan->mcast_handle = NULL;
642 }
643 
644 static struct mlx5_flow_handle *
645 mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
646 {
647 	struct mlx5_flow_act flow_act = {
648 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
649 		.flags = FLOW_ACT_NO_APPEND,
650 	};
651 	struct mlx5_flow_destination dest = {
652 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
653 		.vport.num = port->vport_num,
654 	};
655 	struct mlx5_esw_bridge *bridge = port->bridge;
656 	struct mlx5_flow_spec *rule_spec;
657 	struct mlx5_flow_handle *handle;
658 
659 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
660 	if (!rule_spec)
661 		return ERR_PTR(-ENOMEM);
662 
663 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
664 	    port->vport_num == MLX5_VPORT_UPLINK)
665 		rule_spec->flow_context.flow_source =
666 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
667 
668 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
669 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
670 		dest.vport.vhca_id = port->esw_owner_vhca_id;
671 	}
672 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
673 
674 	kvfree(rule_spec);
675 	return handle;
676 }
677 
678 static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
679 {
680 	struct mlx5_flow_handle *filter_handle, *fwd_handle;
681 	struct mlx5_esw_bridge_vlan *vlan, *failed;
682 	unsigned long index;
683 	int err;
684 
685 
686 	filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
687 		mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
688 		mlx5_esw_bridge_mcast_filter_flow_create(port);
689 	if (IS_ERR(filter_handle))
690 		return PTR_ERR(filter_handle);
691 
692 	fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
693 	if (IS_ERR(fwd_handle)) {
694 		err = PTR_ERR(fwd_handle);
695 		goto err_fwd;
696 	}
697 
698 	xa_for_each(&port->vlans, index, vlan) {
699 		err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
700 		if (err) {
701 			failed = vlan;
702 			goto err_vlan;
703 		}
704 	}
705 
706 	port->mcast.filter_handle = filter_handle;
707 	port->mcast.fwd_handle = fwd_handle;
708 
709 	return 0;
710 
711 err_vlan:
712 	xa_for_each(&port->vlans, index, vlan) {
713 		if (vlan == failed)
714 			break;
715 
716 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
717 	}
718 	mlx5_del_flow_rules(fwd_handle);
719 err_fwd:
720 	mlx5_del_flow_rules(filter_handle);
721 	return err;
722 }
723 
724 static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
725 {
726 	struct mlx5_esw_bridge_vlan *vlan;
727 	unsigned long index;
728 
729 	xa_for_each(&port->vlans, index, vlan)
730 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
731 
732 	if (port->mcast.fwd_handle)
733 		mlx5_del_flow_rules(port->mcast.fwd_handle);
734 	port->mcast.fwd_handle = NULL;
735 	if (port->mcast.filter_handle)
736 		mlx5_del_flow_rules(port->mcast.filter_handle);
737 	port->mcast.filter_handle = NULL;
738 }
739 
740 int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
741 {
742 	struct mlx5_esw_bridge *bridge = port->bridge;
743 	int err;
744 
745 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
746 		return 0;
747 
748 	err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
749 	if (err)
750 		return err;
751 
752 	err = mlx5_esw_bridge_port_mcast_fgs_init(port);
753 	if (err)
754 		goto err_fgs;
755 
756 	err = mlx5_esw_bridge_port_mcast_fhs_init(port);
757 	if (err)
758 		goto err_fhs;
759 	return err;
760 
761 err_fhs:
762 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
763 err_fgs:
764 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
765 	return err;
766 }
767 
768 void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
769 {
770 	mlx5_esw_bridge_port_mdb_flush(port);
771 	mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
772 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
773 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
774 }
775 
776 static struct mlx5_flow_group *
777 mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
778 				       struct mlx5_flow_table *ingress_ft)
779 {
780 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
781 	struct mlx5_flow_group *fg;
782 	u32 *in, *match;
783 
784 	in = kvzalloc(inlen, GFP_KERNEL);
785 	if (!in)
786 		return ERR_PTR(-ENOMEM);
787 
788 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
789 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
790 
791 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
792 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
793 
794 	MLX5_SET(create_flow_group_in, in, start_flow_index,
795 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
796 	MLX5_SET(create_flow_group_in, in, end_flow_index,
797 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
798 
799 	fg = mlx5_create_flow_group(ingress_ft, in);
800 	kvfree(in);
801 	if (IS_ERR(fg))
802 		esw_warn(esw->dev,
803 			 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
804 			 fg);
805 
806 	return fg;
807 }
808 
809 static struct mlx5_flow_group *
810 mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
811 				      struct mlx5_flow_table *ingress_ft)
812 {
813 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
814 	struct mlx5_flow_group *fg;
815 	u32 *in, *match;
816 
817 	if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
818 		esw_warn(esw->dev,
819 			 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
820 		return NULL;
821 	}
822 
823 	in = kvzalloc(inlen, GFP_KERNEL);
824 	if (!in)
825 		return ERR_PTR(-ENOMEM);
826 
827 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
828 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
829 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
830 
831 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
832 	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
833 
834 	MLX5_SET(create_flow_group_in, in, start_flow_index,
835 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
836 	MLX5_SET(create_flow_group_in, in, end_flow_index,
837 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
838 
839 	fg = mlx5_create_flow_group(ingress_ft, in);
840 	kvfree(in);
841 	if (IS_ERR(fg))
842 		esw_warn(esw->dev,
843 			 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
844 			 fg);
845 
846 	return fg;
847 }
848 
849 static int
850 mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
851 {
852 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
853 	struct mlx5_eswitch *esw = br_offloads->esw;
854 	struct mlx5_flow_group *igmp_fg, *mld_fg;
855 
856 	igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
857 	if (IS_ERR(igmp_fg))
858 		return PTR_ERR(igmp_fg);
859 
860 	mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
861 	if (IS_ERR(mld_fg)) {
862 		mlx5_destroy_flow_group(igmp_fg);
863 		return PTR_ERR(mld_fg);
864 	}
865 
866 	br_offloads->ingress_igmp_fg = igmp_fg;
867 	br_offloads->ingress_mld_fg = mld_fg;
868 	return 0;
869 }
870 
871 static void
872 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
873 {
874 	if (br_offloads->ingress_mld_fg)
875 		mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
876 	br_offloads->ingress_mld_fg = NULL;
877 	if (br_offloads->ingress_igmp_fg)
878 		mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
879 	br_offloads->ingress_igmp_fg = NULL;
880 }
881 
882 static struct mlx5_flow_handle *
883 mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
884 				       struct mlx5_flow_table *skip_ft)
885 {
886 	struct mlx5_flow_destination dest = {
887 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
888 		.ft = skip_ft,
889 	};
890 	struct mlx5_flow_act flow_act = {
891 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
892 		.flags = FLOW_ACT_NO_APPEND,
893 	};
894 	struct mlx5_flow_spec *rule_spec;
895 	struct mlx5_flow_handle *handle;
896 
897 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
898 	if (!rule_spec)
899 		return ERR_PTR(-ENOMEM);
900 
901 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
902 
903 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
904 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
905 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
906 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
907 
908 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
909 
910 	kvfree(rule_spec);
911 	return handle;
912 }
913 
914 static struct mlx5_flow_handle *
915 mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
916 				      struct mlx5_flow_table *skip_ft)
917 {
918 	struct mlx5_flow_destination dest = {
919 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
920 		.ft = skip_ft,
921 	};
922 	struct mlx5_flow_act flow_act = {
923 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
924 		.flags = FLOW_ACT_NO_APPEND,
925 	};
926 	struct mlx5_flow_spec *rule_spec;
927 	struct mlx5_flow_handle *handle;
928 
929 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
930 	if (!rule_spec)
931 		return ERR_PTR(-ENOMEM);
932 
933 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
934 
935 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
936 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
937 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
938 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
939 
940 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
941 
942 	kvfree(rule_spec);
943 	return handle;
944 }
945 
946 static int
947 mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
948 {
949 	struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
950 		*mld_done_handle;
951 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
952 		*skip_ft = br_offloads->skip_ft;
953 	int err;
954 
955 	igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
956 	if (IS_ERR(igmp_handle))
957 		return PTR_ERR(igmp_handle);
958 
959 	if (br_offloads->ingress_mld_fg) {
960 		mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
961 									 ingress_ft,
962 									 skip_ft);
963 		if (IS_ERR(mld_query_handle)) {
964 			err = PTR_ERR(mld_query_handle);
965 			goto err_mld_query;
966 		}
967 
968 		mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
969 									  ingress_ft,
970 									  skip_ft);
971 		if (IS_ERR(mld_report_handle)) {
972 			err = PTR_ERR(mld_report_handle);
973 			goto err_mld_report;
974 		}
975 
976 		mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
977 									ingress_ft,
978 									skip_ft);
979 		if (IS_ERR(mld_done_handle)) {
980 			err = PTR_ERR(mld_done_handle);
981 			goto err_mld_done;
982 		}
983 	} else {
984 		mld_query_handle = NULL;
985 		mld_report_handle = NULL;
986 		mld_done_handle = NULL;
987 	}
988 
989 	br_offloads->igmp_handle = igmp_handle;
990 	br_offloads->mld_query_handle = mld_query_handle;
991 	br_offloads->mld_report_handle = mld_report_handle;
992 	br_offloads->mld_done_handle = mld_done_handle;
993 
994 	return 0;
995 
996 err_mld_done:
997 	mlx5_del_flow_rules(mld_report_handle);
998 err_mld_report:
999 	mlx5_del_flow_rules(mld_query_handle);
1000 err_mld_query:
1001 	mlx5_del_flow_rules(igmp_handle);
1002 	return err;
1003 }
1004 
1005 static void
1006 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
1007 {
1008 	if (br_offloads->mld_done_handle)
1009 		mlx5_del_flow_rules(br_offloads->mld_done_handle);
1010 	br_offloads->mld_done_handle = NULL;
1011 	if (br_offloads->mld_report_handle)
1012 		mlx5_del_flow_rules(br_offloads->mld_report_handle);
1013 	br_offloads->mld_report_handle = NULL;
1014 	if (br_offloads->mld_query_handle)
1015 		mlx5_del_flow_rules(br_offloads->mld_query_handle);
1016 	br_offloads->mld_query_handle = NULL;
1017 	if (br_offloads->igmp_handle)
1018 		mlx5_del_flow_rules(br_offloads->igmp_handle);
1019 	br_offloads->igmp_handle = NULL;
1020 }
1021 
1022 static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1023 {
1024 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1025 	struct mlx5_esw_bridge_port *port, *failed;
1026 	unsigned long i;
1027 	int err;
1028 
1029 	xa_for_each(&br_offloads->ports, i, port) {
1030 		if (port->bridge != bridge)
1031 			continue;
1032 
1033 		err = mlx5_esw_bridge_port_mcast_init(port);
1034 		if (err) {
1035 			failed = port;
1036 			goto err_port;
1037 		}
1038 	}
1039 	return 0;
1040 
1041 err_port:
1042 	xa_for_each(&br_offloads->ports, i, port) {
1043 		if (port == failed)
1044 			break;
1045 		if (port->bridge != bridge)
1046 			continue;
1047 
1048 		mlx5_esw_bridge_port_mcast_cleanup(port);
1049 	}
1050 	return err;
1051 }
1052 
1053 static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1054 {
1055 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1056 	struct mlx5_esw_bridge_port *port;
1057 	unsigned long i;
1058 
1059 	xa_for_each(&br_offloads->ports, i, port) {
1060 		if (port->bridge != bridge)
1061 			continue;
1062 
1063 		mlx5_esw_bridge_port_mcast_cleanup(port);
1064 	}
1065 }
1066 
1067 static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1068 {
1069 	int err;
1070 
1071 	if (br_offloads->ingress_igmp_fg)
1072 		return 0; /* already enabled by another bridge */
1073 
1074 	err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1075 	if (err) {
1076 		esw_warn(br_offloads->esw->dev,
1077 			 "Failed to create global multicast flow groups (err=%d)\n",
1078 			 err);
1079 		return err;
1080 	}
1081 
1082 	err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1083 	if (err) {
1084 		esw_warn(br_offloads->esw->dev,
1085 			 "Failed to create global multicast flows (err=%d)\n",
1086 			 err);
1087 		goto err_fhs;
1088 	}
1089 
1090 	return 0;
1091 
1092 err_fhs:
1093 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1094 	return err;
1095 }
1096 
1097 static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1098 {
1099 	struct mlx5_esw_bridge *br;
1100 
1101 	list_for_each_entry(br, &br_offloads->bridges, list) {
1102 		/* Ingress table is global, so only disable snooping when all
1103 		 * bridges on esw have multicast disabled.
1104 		 */
1105 		if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1106 			return;
1107 	}
1108 
1109 	mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1110 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1111 }
1112 
1113 int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1114 {
1115 	int err;
1116 
1117 	err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1118 	if (err)
1119 		return err;
1120 
1121 	bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1122 
1123 	err = mlx5_esw_brige_mcast_init(bridge);
1124 	if (err) {
1125 		esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1126 			 err);
1127 		bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1128 		mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1129 	}
1130 	return err;
1131 }
1132 
1133 void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1134 {
1135 	mlx5_esw_brige_mcast_cleanup(bridge);
1136 	bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1137 	mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1138 }
1139