1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "lib/devcom.h"
5 #include "bridge.h"
6 #include "eswitch.h"
7 #include "bridge_priv.h"
8 #include "diag/bridge_tracepoint.h"
9 
10 static const struct rhashtable_params mdb_ht_params = {
11 	.key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
12 	.key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
13 	.head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
14 	.automatic_shrinking = true,
15 };
16 
17 int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
18 {
19 	INIT_LIST_HEAD(&bridge->mdb_list);
20 	return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
21 }
22 
23 void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
24 {
25 	rhashtable_destroy(&bridge->mdb_ht);
26 }
27 
28 static struct mlx5_esw_bridge_port *
29 mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
30 				struct mlx5_esw_bridge_mdb_entry *entry)
31 {
32 	return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
33 }
34 
35 static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
36 					   struct mlx5_esw_bridge_mdb_entry *entry)
37 {
38 	int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
39 
40 	if (!err)
41 		entry->num_ports++;
42 	return err;
43 }
44 
45 static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
46 					    struct mlx5_esw_bridge_mdb_entry *entry)
47 {
48 	xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
49 	entry->num_ports--;
50 }
51 
52 static struct mlx5_flow_handle *
53 mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
54 				struct mlx5_esw_bridge *bridge)
55 {
56 	struct mlx5_flow_act flow_act = {
57 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
58 		.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
59 	};
60 	int num_dests = entry->num_ports, i = 0;
61 	struct mlx5_flow_destination *dests;
62 	struct mlx5_esw_bridge_port *port;
63 	struct mlx5_flow_spec *rule_spec;
64 	struct mlx5_flow_handle *handle;
65 	u8 *dmac_v, *dmac_c;
66 	unsigned long idx;
67 
68 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
69 	if (!rule_spec)
70 		return ERR_PTR(-ENOMEM);
71 
72 	dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
73 	if (!dests) {
74 		kvfree(rule_spec);
75 		return ERR_PTR(-ENOMEM);
76 	}
77 
78 	xa_for_each(&entry->ports, idx, port) {
79 		dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
80 		dests[i].ft = port->mcast.ft;
81 		i++;
82 	}
83 
84 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
85 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
86 	ether_addr_copy(dmac_v, entry->key.addr);
87 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
88 	eth_broadcast_addr(dmac_c);
89 
90 	if (entry->key.vid) {
91 		if (bridge->vlan_proto == ETH_P_8021Q) {
92 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
93 					 outer_headers.cvlan_tag);
94 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
95 					 outer_headers.cvlan_tag);
96 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
97 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
98 					 outer_headers.svlan_tag);
99 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
100 					 outer_headers.svlan_tag);
101 		}
102 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
103 				 outer_headers.first_vid);
104 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
105 			 entry->key.vid);
106 	}
107 
108 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
109 
110 	kvfree(dests);
111 	kvfree(rule_spec);
112 	return handle;
113 }
114 
115 static int
116 mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
117 				 struct mlx5_esw_bridge_mdb_entry *entry)
118 {
119 	struct mlx5_flow_handle *handle;
120 
121 	handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
122 	if (entry->egress_handle) {
123 		mlx5_del_flow_rules(entry->egress_handle);
124 		entry->egress_handle = NULL;
125 	}
126 	if (IS_ERR(handle))
127 		return PTR_ERR(handle);
128 
129 	entry->egress_handle = handle;
130 	return 0;
131 }
132 
133 static struct mlx5_esw_bridge_mdb_entry *
134 mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
135 			   const unsigned char *addr, u16 vid)
136 {
137 	struct mlx5_esw_bridge_mdb_key key = {};
138 
139 	ether_addr_copy(key.addr, addr);
140 	key.vid = vid;
141 	return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
142 }
143 
144 static struct mlx5_esw_bridge_mdb_entry *
145 mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
146 				    const unsigned char *addr, u16 vid)
147 {
148 	struct mlx5_esw_bridge *bridge = port->bridge;
149 	struct mlx5_esw_bridge_mdb_entry *entry;
150 	int err;
151 
152 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
153 	if (!entry)
154 		return ERR_PTR(-ENOMEM);
155 
156 	ether_addr_copy(entry->key.addr, addr);
157 	entry->key.vid = vid;
158 	xa_init(&entry->ports);
159 	err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
160 	if (err)
161 		goto err_ht_insert;
162 
163 	list_add(&entry->list, &bridge->mdb_list);
164 
165 	return entry;
166 
167 err_ht_insert:
168 	xa_destroy(&entry->ports);
169 	kvfree(entry);
170 	return ERR_PTR(err);
171 }
172 
173 static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
174 						   struct mlx5_esw_bridge_mdb_entry *entry)
175 {
176 	if (entry->egress_handle)
177 		mlx5_del_flow_rules(entry->egress_handle);
178 	list_del(&entry->list);
179 	rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
180 	xa_destroy(&entry->ports);
181 	kvfree(entry);
182 }
183 
184 int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
185 				    const unsigned char *addr, u16 vid)
186 {
187 	struct mlx5_esw_bridge *bridge = port->bridge;
188 	struct mlx5_esw_bridge_mdb_entry *entry;
189 	int err;
190 
191 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
192 		return -EOPNOTSUPP;
193 
194 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
195 	if (entry) {
196 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
197 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
198 				 addr, vid, port->vport_num);
199 			return 0;
200 		}
201 	} else {
202 		entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
203 		if (IS_ERR(entry)) {
204 			err = PTR_ERR(entry);
205 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
206 				 addr, vid, port->vport_num, err);
207 			return err;
208 		}
209 	}
210 
211 	err = mlx5_esw_bridge_mdb_port_insert(port, entry);
212 	if (err) {
213 		if (!entry->num_ports)
214 			mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
215 		esw_warn(bridge->br_offloads->esw->dev,
216 			 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
217 			 addr, vid, port->vport_num, err);
218 		return err;
219 	}
220 
221 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
222 	if (err)
223 		/* Single mdb can be used by multiple ports, so just log the
224 		 * error and continue.
225 		 */
226 		esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
227 			 addr, vid, port->vport_num, err);
228 
229 	trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
230 	return 0;
231 }
232 
233 static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
234 						  struct mlx5_esw_bridge_mdb_entry *entry)
235 {
236 	struct mlx5_esw_bridge *bridge = port->bridge;
237 	int err;
238 
239 	mlx5_esw_bridge_mdb_port_remove(port, entry);
240 	if (!entry->num_ports) {
241 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
242 		return;
243 	}
244 
245 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
246 	if (err)
247 		/* Single mdb can be used by multiple ports, so just log the
248 		 * error and continue.
249 		 */
250 		esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
251 			 entry->key.addr, entry->key.vid, port->vport_num);
252 }
253 
254 void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
255 				     const unsigned char *addr, u16 vid)
256 {
257 	struct mlx5_esw_bridge *bridge = port->bridge;
258 	struct mlx5_esw_bridge_mdb_entry *entry;
259 
260 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
261 	if (!entry) {
262 		esw_debug(bridge->br_offloads->esw->dev,
263 			  "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
264 			  addr, vid, port->vport_num);
265 		return;
266 	}
267 
268 	if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
269 		esw_debug(bridge->br_offloads->esw->dev,
270 			  "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
271 			  addr, vid, port->vport_num);
272 		return;
273 	}
274 
275 	trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
276 	mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
277 }
278 
279 void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
280 					 struct mlx5_esw_bridge_vlan *vlan)
281 {
282 	struct mlx5_esw_bridge *bridge = port->bridge;
283 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
284 
285 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
286 		if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
287 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
288 }
289 
290 static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
291 {
292 	struct mlx5_esw_bridge *bridge = port->bridge;
293 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
294 
295 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
296 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
297 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
298 }
299 
300 void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
301 {
302 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
303 
304 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
305 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
306 }
307 static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
308 					       struct mlx5_esw_bridge *bridge)
309 {
310 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
311 	struct mlx5_flow_table *mcast_ft;
312 
313 	mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
314 						MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
315 						esw);
316 	if (IS_ERR(mcast_ft))
317 		return PTR_ERR(mcast_ft);
318 
319 	port->mcast.ft = mcast_ft;
320 	return 0;
321 }
322 
323 static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
324 {
325 	if (port->mcast.ft)
326 		mlx5_destroy_flow_table(port->mcast.ft);
327 	port->mcast.ft = NULL;
328 }
329 
330 static struct mlx5_flow_group *
331 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
332 				       struct mlx5_flow_table *mcast_ft)
333 {
334 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
335 	struct mlx5_flow_group *fg;
336 	u32 *in, *match;
337 
338 	in = kvzalloc(inlen, GFP_KERNEL);
339 	if (!in)
340 		return ERR_PTR(-ENOMEM);
341 
342 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
343 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
344 
345 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
346 		 mlx5_eswitch_get_vport_metadata_mask());
347 
348 	MLX5_SET(create_flow_group_in, in, start_flow_index,
349 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
350 	MLX5_SET(create_flow_group_in, in, end_flow_index,
351 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
352 
353 	fg = mlx5_create_flow_group(mcast_ft, in);
354 	kvfree(in);
355 	if (IS_ERR(fg))
356 		esw_warn(esw->dev,
357 			 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
358 			 fg);
359 
360 	return fg;
361 }
362 
363 static struct mlx5_flow_group *
364 mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
365 					   struct mlx5_eswitch *esw,
366 					   struct mlx5_flow_table *mcast_ft)
367 {
368 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
369 	struct mlx5_flow_group *fg;
370 	u32 *in, *match;
371 
372 	in = kvzalloc(inlen, GFP_KERNEL);
373 	if (!in)
374 		return ERR_PTR(-ENOMEM);
375 
376 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
377 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
378 
379 	if (vlan_proto == ETH_P_8021Q)
380 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
381 	else if (vlan_proto == ETH_P_8021AD)
382 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
383 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
384 
385 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
386 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
387 
388 	fg = mlx5_create_flow_group(mcast_ft, in);
389 	kvfree(in);
390 	if (IS_ERR(fg))
391 		esw_warn(esw->dev,
392 			 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
393 			 vlan_proto, fg);
394 
395 	return fg;
396 }
397 
398 static struct mlx5_flow_group *
399 mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
400 {
401 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
402 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
403 
404 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
405 }
406 
407 static struct mlx5_flow_group *
408 mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
409 				     struct mlx5_flow_table *mcast_ft)
410 {
411 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
412 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
413 
414 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
415 }
416 
417 static struct mlx5_flow_group *
418 mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
419 				    struct mlx5_flow_table *mcast_ft)
420 {
421 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
422 	struct mlx5_flow_group *fg;
423 	u32 *in;
424 
425 	in = kvzalloc(inlen, GFP_KERNEL);
426 	if (!in)
427 		return ERR_PTR(-ENOMEM);
428 
429 	MLX5_SET(create_flow_group_in, in, start_flow_index,
430 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
431 	MLX5_SET(create_flow_group_in, in, end_flow_index,
432 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
433 
434 	fg = mlx5_create_flow_group(mcast_ft, in);
435 	kvfree(in);
436 	if (IS_ERR(fg))
437 		esw_warn(esw->dev,
438 			 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
439 			 fg);
440 
441 	return fg;
442 }
443 
444 static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
445 {
446 	struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
447 	struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
448 	struct mlx5_flow_table *mcast_ft = port->mcast.ft;
449 	int err;
450 
451 	filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
452 	if (IS_ERR(filter_fg))
453 		return PTR_ERR(filter_fg);
454 
455 	vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
456 	if (IS_ERR(vlan_fg)) {
457 		err = PTR_ERR(vlan_fg);
458 		goto err_vlan_fg;
459 	}
460 
461 	qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
462 	if (IS_ERR(qinq_fg)) {
463 		err = PTR_ERR(qinq_fg);
464 		goto err_qinq_fg;
465 	}
466 
467 	fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
468 	if (IS_ERR(fwd_fg)) {
469 		err = PTR_ERR(fwd_fg);
470 		goto err_fwd_fg;
471 	}
472 
473 	port->mcast.filter_fg = filter_fg;
474 	port->mcast.vlan_fg = vlan_fg;
475 	port->mcast.qinq_fg = qinq_fg;
476 	port->mcast.fwd_fg = fwd_fg;
477 
478 	return 0;
479 
480 err_fwd_fg:
481 	mlx5_destroy_flow_group(qinq_fg);
482 err_qinq_fg:
483 	mlx5_destroy_flow_group(vlan_fg);
484 err_vlan_fg:
485 	mlx5_destroy_flow_group(filter_fg);
486 	return err;
487 }
488 
489 static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
490 {
491 	if (port->mcast.fwd_fg)
492 		mlx5_destroy_flow_group(port->mcast.fwd_fg);
493 	port->mcast.fwd_fg = NULL;
494 	if (port->mcast.qinq_fg)
495 		mlx5_destroy_flow_group(port->mcast.qinq_fg);
496 	port->mcast.qinq_fg = NULL;
497 	if (port->mcast.vlan_fg)
498 		mlx5_destroy_flow_group(port->mcast.vlan_fg);
499 	port->mcast.vlan_fg = NULL;
500 	if (port->mcast.filter_fg)
501 		mlx5_destroy_flow_group(port->mcast.filter_fg);
502 	port->mcast.filter_fg = NULL;
503 }
504 
505 static struct mlx5_flow_handle *
506 mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
507 					   struct mlx5_eswitch *esw)
508 {
509 	struct mlx5_flow_act flow_act = {
510 		.action = MLX5_FLOW_CONTEXT_ACTION_DROP,
511 		.flags = FLOW_ACT_NO_APPEND,
512 	};
513 	struct mlx5_flow_spec *rule_spec;
514 	struct mlx5_flow_handle *handle;
515 
516 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
517 	if (!rule_spec)
518 		return ERR_PTR(-ENOMEM);
519 
520 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
521 
522 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
523 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
524 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
525 		 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
526 
527 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
528 
529 	kvfree(rule_spec);
530 	return handle;
531 }
532 
533 static struct mlx5_flow_handle *
534 mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
535 {
536 	return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
537 }
538 
539 static struct mlx5_flow_handle *
540 mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
541 {
542 	struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
543 	struct mlx5_eswitch *tmp, *peer_esw = NULL;
544 	static struct mlx5_flow_handle *handle;
545 	int i;
546 
547 	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
548 		return ERR_PTR(-ENODEV);
549 
550 	mlx5_devcom_for_each_peer_entry(devcom,
551 					MLX5_DEVCOM_ESW_OFFLOADS,
552 					tmp, i) {
553 		if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
554 			peer_esw = tmp;
555 			break;
556 		}
557 	}
558 	if (!peer_esw) {
559 		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
560 		return ERR_PTR(-ENODEV);
561 	}
562 
563 	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
564 
565 	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
566 	return handle;
567 }
568 
569 static struct mlx5_flow_handle *
570 mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
571 				       struct mlx5_esw_bridge_vlan *vlan)
572 {
573 	struct mlx5_flow_act flow_act = {
574 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
575 		.flags = FLOW_ACT_NO_APPEND,
576 	};
577 	struct mlx5_flow_destination dest = {
578 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
579 		.vport.num = port->vport_num,
580 	};
581 	struct mlx5_esw_bridge *bridge = port->bridge;
582 	struct mlx5_flow_spec *rule_spec;
583 	struct mlx5_flow_handle *handle;
584 
585 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
586 	if (!rule_spec)
587 		return ERR_PTR(-ENOMEM);
588 
589 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
590 	    port->vport_num == MLX5_VPORT_UPLINK)
591 		rule_spec->flow_context.flow_source =
592 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
593 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
594 
595 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
596 	flow_act.pkt_reformat = vlan->pkt_reformat_pop;
597 
598 	if (vlan_proto == ETH_P_8021Q) {
599 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
600 				 outer_headers.cvlan_tag);
601 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
602 				 outer_headers.cvlan_tag);
603 	} else if (vlan_proto == ETH_P_8021AD) {
604 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
605 				 outer_headers.svlan_tag);
606 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
607 				 outer_headers.svlan_tag);
608 	}
609 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
610 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
611 
612 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
613 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
614 		dest.vport.vhca_id = port->esw_owner_vhca_id;
615 	}
616 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
617 
618 	kvfree(rule_spec);
619 	return handle;
620 }
621 
622 int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
623 				    struct mlx5_esw_bridge_vlan *vlan)
624 {
625 	struct mlx5_flow_handle *handle;
626 
627 	if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
628 		return 0;
629 
630 	handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
631 	if (IS_ERR(handle))
632 		return PTR_ERR(handle);
633 
634 	vlan->mcast_handle = handle;
635 	return 0;
636 }
637 
638 void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
639 {
640 	if (vlan->mcast_handle)
641 		mlx5_del_flow_rules(vlan->mcast_handle);
642 	vlan->mcast_handle = NULL;
643 }
644 
645 static struct mlx5_flow_handle *
646 mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
647 {
648 	struct mlx5_flow_act flow_act = {
649 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
650 		.flags = FLOW_ACT_NO_APPEND,
651 	};
652 	struct mlx5_flow_destination dest = {
653 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
654 		.vport.num = port->vport_num,
655 	};
656 	struct mlx5_esw_bridge *bridge = port->bridge;
657 	struct mlx5_flow_spec *rule_spec;
658 	struct mlx5_flow_handle *handle;
659 
660 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
661 	if (!rule_spec)
662 		return ERR_PTR(-ENOMEM);
663 
664 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
665 	    port->vport_num == MLX5_VPORT_UPLINK)
666 		rule_spec->flow_context.flow_source =
667 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
668 
669 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
670 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
671 		dest.vport.vhca_id = port->esw_owner_vhca_id;
672 	}
673 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
674 
675 	kvfree(rule_spec);
676 	return handle;
677 }
678 
679 static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
680 {
681 	struct mlx5_flow_handle *filter_handle, *fwd_handle;
682 	struct mlx5_esw_bridge_vlan *vlan, *failed;
683 	unsigned long index;
684 	int err;
685 
686 
687 	filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
688 		mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
689 		mlx5_esw_bridge_mcast_filter_flow_create(port);
690 	if (IS_ERR(filter_handle))
691 		return PTR_ERR(filter_handle);
692 
693 	fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
694 	if (IS_ERR(fwd_handle)) {
695 		err = PTR_ERR(fwd_handle);
696 		goto err_fwd;
697 	}
698 
699 	xa_for_each(&port->vlans, index, vlan) {
700 		err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
701 		if (err) {
702 			failed = vlan;
703 			goto err_vlan;
704 		}
705 	}
706 
707 	port->mcast.filter_handle = filter_handle;
708 	port->mcast.fwd_handle = fwd_handle;
709 
710 	return 0;
711 
712 err_vlan:
713 	xa_for_each(&port->vlans, index, vlan) {
714 		if (vlan == failed)
715 			break;
716 
717 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
718 	}
719 	mlx5_del_flow_rules(fwd_handle);
720 err_fwd:
721 	mlx5_del_flow_rules(filter_handle);
722 	return err;
723 }
724 
725 static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
726 {
727 	struct mlx5_esw_bridge_vlan *vlan;
728 	unsigned long index;
729 
730 	xa_for_each(&port->vlans, index, vlan)
731 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
732 
733 	if (port->mcast.fwd_handle)
734 		mlx5_del_flow_rules(port->mcast.fwd_handle);
735 	port->mcast.fwd_handle = NULL;
736 	if (port->mcast.filter_handle)
737 		mlx5_del_flow_rules(port->mcast.filter_handle);
738 	port->mcast.filter_handle = NULL;
739 }
740 
741 int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
742 {
743 	struct mlx5_esw_bridge *bridge = port->bridge;
744 	int err;
745 
746 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
747 		return 0;
748 
749 	err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
750 	if (err)
751 		return err;
752 
753 	err = mlx5_esw_bridge_port_mcast_fgs_init(port);
754 	if (err)
755 		goto err_fgs;
756 
757 	err = mlx5_esw_bridge_port_mcast_fhs_init(port);
758 	if (err)
759 		goto err_fhs;
760 	return err;
761 
762 err_fhs:
763 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
764 err_fgs:
765 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
766 	return err;
767 }
768 
769 void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
770 {
771 	mlx5_esw_bridge_port_mdb_flush(port);
772 	mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
773 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
774 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
775 }
776 
777 static struct mlx5_flow_group *
778 mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
779 				       struct mlx5_flow_table *ingress_ft)
780 {
781 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
782 	struct mlx5_flow_group *fg;
783 	u32 *in, *match;
784 
785 	in = kvzalloc(inlen, GFP_KERNEL);
786 	if (!in)
787 		return ERR_PTR(-ENOMEM);
788 
789 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
790 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
791 
792 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
793 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
794 
795 	MLX5_SET(create_flow_group_in, in, start_flow_index,
796 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
797 	MLX5_SET(create_flow_group_in, in, end_flow_index,
798 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
799 
800 	fg = mlx5_create_flow_group(ingress_ft, in);
801 	kvfree(in);
802 	if (IS_ERR(fg))
803 		esw_warn(esw->dev,
804 			 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
805 			 fg);
806 
807 	return fg;
808 }
809 
810 static struct mlx5_flow_group *
811 mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
812 				      struct mlx5_flow_table *ingress_ft)
813 {
814 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
815 	struct mlx5_flow_group *fg;
816 	u32 *in, *match;
817 
818 	if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
819 		esw_warn(esw->dev,
820 			 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
821 		return NULL;
822 	}
823 
824 	in = kvzalloc(inlen, GFP_KERNEL);
825 	if (!in)
826 		return ERR_PTR(-ENOMEM);
827 
828 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
829 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
830 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
831 
832 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
833 	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
834 
835 	MLX5_SET(create_flow_group_in, in, start_flow_index,
836 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
837 	MLX5_SET(create_flow_group_in, in, end_flow_index,
838 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
839 
840 	fg = mlx5_create_flow_group(ingress_ft, in);
841 	kvfree(in);
842 	if (IS_ERR(fg))
843 		esw_warn(esw->dev,
844 			 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
845 			 fg);
846 
847 	return fg;
848 }
849 
850 static int
851 mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
852 {
853 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
854 	struct mlx5_eswitch *esw = br_offloads->esw;
855 	struct mlx5_flow_group *igmp_fg, *mld_fg;
856 
857 	igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
858 	if (IS_ERR(igmp_fg))
859 		return PTR_ERR(igmp_fg);
860 
861 	mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
862 	if (IS_ERR(mld_fg)) {
863 		mlx5_destroy_flow_group(igmp_fg);
864 		return PTR_ERR(mld_fg);
865 	}
866 
867 	br_offloads->ingress_igmp_fg = igmp_fg;
868 	br_offloads->ingress_mld_fg = mld_fg;
869 	return 0;
870 }
871 
872 static void
873 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
874 {
875 	if (br_offloads->ingress_mld_fg)
876 		mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
877 	br_offloads->ingress_mld_fg = NULL;
878 	if (br_offloads->ingress_igmp_fg)
879 		mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
880 	br_offloads->ingress_igmp_fg = NULL;
881 }
882 
883 static struct mlx5_flow_handle *
884 mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
885 				       struct mlx5_flow_table *skip_ft)
886 {
887 	struct mlx5_flow_destination dest = {
888 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
889 		.ft = skip_ft,
890 	};
891 	struct mlx5_flow_act flow_act = {
892 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
893 		.flags = FLOW_ACT_NO_APPEND,
894 	};
895 	struct mlx5_flow_spec *rule_spec;
896 	struct mlx5_flow_handle *handle;
897 
898 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
899 	if (!rule_spec)
900 		return ERR_PTR(-ENOMEM);
901 
902 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
903 
904 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
905 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
906 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
907 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
908 
909 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
910 
911 	kvfree(rule_spec);
912 	return handle;
913 }
914 
915 static struct mlx5_flow_handle *
916 mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
917 				      struct mlx5_flow_table *skip_ft)
918 {
919 	struct mlx5_flow_destination dest = {
920 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
921 		.ft = skip_ft,
922 	};
923 	struct mlx5_flow_act flow_act = {
924 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
925 		.flags = FLOW_ACT_NO_APPEND,
926 	};
927 	struct mlx5_flow_spec *rule_spec;
928 	struct mlx5_flow_handle *handle;
929 
930 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
931 	if (!rule_spec)
932 		return ERR_PTR(-ENOMEM);
933 
934 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
935 
936 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
937 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
938 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
939 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
940 
941 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
942 
943 	kvfree(rule_spec);
944 	return handle;
945 }
946 
947 static int
948 mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
949 {
950 	struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
951 		*mld_done_handle;
952 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
953 		*skip_ft = br_offloads->skip_ft;
954 	int err;
955 
956 	igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
957 	if (IS_ERR(igmp_handle))
958 		return PTR_ERR(igmp_handle);
959 
960 	if (br_offloads->ingress_mld_fg) {
961 		mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
962 									 ingress_ft,
963 									 skip_ft);
964 		if (IS_ERR(mld_query_handle)) {
965 			err = PTR_ERR(mld_query_handle);
966 			goto err_mld_query;
967 		}
968 
969 		mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
970 									  ingress_ft,
971 									  skip_ft);
972 		if (IS_ERR(mld_report_handle)) {
973 			err = PTR_ERR(mld_report_handle);
974 			goto err_mld_report;
975 		}
976 
977 		mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
978 									ingress_ft,
979 									skip_ft);
980 		if (IS_ERR(mld_done_handle)) {
981 			err = PTR_ERR(mld_done_handle);
982 			goto err_mld_done;
983 		}
984 	} else {
985 		mld_query_handle = NULL;
986 		mld_report_handle = NULL;
987 		mld_done_handle = NULL;
988 	}
989 
990 	br_offloads->igmp_handle = igmp_handle;
991 	br_offloads->mld_query_handle = mld_query_handle;
992 	br_offloads->mld_report_handle = mld_report_handle;
993 	br_offloads->mld_done_handle = mld_done_handle;
994 
995 	return 0;
996 
997 err_mld_done:
998 	mlx5_del_flow_rules(mld_report_handle);
999 err_mld_report:
1000 	mlx5_del_flow_rules(mld_query_handle);
1001 err_mld_query:
1002 	mlx5_del_flow_rules(igmp_handle);
1003 	return err;
1004 }
1005 
1006 static void
1007 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
1008 {
1009 	if (br_offloads->mld_done_handle)
1010 		mlx5_del_flow_rules(br_offloads->mld_done_handle);
1011 	br_offloads->mld_done_handle = NULL;
1012 	if (br_offloads->mld_report_handle)
1013 		mlx5_del_flow_rules(br_offloads->mld_report_handle);
1014 	br_offloads->mld_report_handle = NULL;
1015 	if (br_offloads->mld_query_handle)
1016 		mlx5_del_flow_rules(br_offloads->mld_query_handle);
1017 	br_offloads->mld_query_handle = NULL;
1018 	if (br_offloads->igmp_handle)
1019 		mlx5_del_flow_rules(br_offloads->igmp_handle);
1020 	br_offloads->igmp_handle = NULL;
1021 }
1022 
1023 static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1024 {
1025 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1026 	struct mlx5_esw_bridge_port *port, *failed;
1027 	unsigned long i;
1028 	int err;
1029 
1030 	xa_for_each(&br_offloads->ports, i, port) {
1031 		if (port->bridge != bridge)
1032 			continue;
1033 
1034 		err = mlx5_esw_bridge_port_mcast_init(port);
1035 		if (err) {
1036 			failed = port;
1037 			goto err_port;
1038 		}
1039 	}
1040 	return 0;
1041 
1042 err_port:
1043 	xa_for_each(&br_offloads->ports, i, port) {
1044 		if (port == failed)
1045 			break;
1046 		if (port->bridge != bridge)
1047 			continue;
1048 
1049 		mlx5_esw_bridge_port_mcast_cleanup(port);
1050 	}
1051 	return err;
1052 }
1053 
1054 static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1055 {
1056 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1057 	struct mlx5_esw_bridge_port *port;
1058 	unsigned long i;
1059 
1060 	xa_for_each(&br_offloads->ports, i, port) {
1061 		if (port->bridge != bridge)
1062 			continue;
1063 
1064 		mlx5_esw_bridge_port_mcast_cleanup(port);
1065 	}
1066 }
1067 
1068 static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1069 {
1070 	int err;
1071 
1072 	if (br_offloads->ingress_igmp_fg)
1073 		return 0; /* already enabled by another bridge */
1074 
1075 	err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1076 	if (err) {
1077 		esw_warn(br_offloads->esw->dev,
1078 			 "Failed to create global multicast flow groups (err=%d)\n",
1079 			 err);
1080 		return err;
1081 	}
1082 
1083 	err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1084 	if (err) {
1085 		esw_warn(br_offloads->esw->dev,
1086 			 "Failed to create global multicast flows (err=%d)\n",
1087 			 err);
1088 		goto err_fhs;
1089 	}
1090 
1091 	return 0;
1092 
1093 err_fhs:
1094 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1095 	return err;
1096 }
1097 
1098 static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1099 {
1100 	struct mlx5_esw_bridge *br;
1101 
1102 	list_for_each_entry(br, &br_offloads->bridges, list) {
1103 		/* Ingress table is global, so only disable snooping when all
1104 		 * bridges on esw have multicast disabled.
1105 		 */
1106 		if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1107 			return;
1108 	}
1109 
1110 	mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1111 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1112 }
1113 
1114 int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1115 {
1116 	int err;
1117 
1118 	err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1119 	if (err)
1120 		return err;
1121 
1122 	bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1123 
1124 	err = mlx5_esw_brige_mcast_init(bridge);
1125 	if (err) {
1126 		esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1127 			 err);
1128 		bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1129 		mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1130 	}
1131 	return err;
1132 }
1133 
1134 void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1135 {
1136 	mlx5_esw_brige_mcast_cleanup(bridge);
1137 	bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1138 	mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1139 }
1140