1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "lib/devcom.h"
5 #include "bridge.h"
6 #include "eswitch.h"
7 #include "bridge_priv.h"
8 #include "diag/bridge_tracepoint.h"
9 
10 static const struct rhashtable_params mdb_ht_params = {
11 	.key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
12 	.key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
13 	.head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
14 	.automatic_shrinking = true,
15 };
16 
17 int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
18 {
19 	INIT_LIST_HEAD(&bridge->mdb_list);
20 	return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
21 }
22 
23 void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
24 {
25 	rhashtable_destroy(&bridge->mdb_ht);
26 }
27 
28 static struct mlx5_esw_bridge_port *
29 mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
30 				struct mlx5_esw_bridge_mdb_entry *entry)
31 {
32 	return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
33 }
34 
35 static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
36 					   struct mlx5_esw_bridge_mdb_entry *entry)
37 {
38 	int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
39 
40 	if (!err)
41 		entry->num_ports++;
42 	return err;
43 }
44 
45 static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
46 					    struct mlx5_esw_bridge_mdb_entry *entry)
47 {
48 	xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
49 	entry->num_ports--;
50 }
51 
52 static struct mlx5_flow_handle *
53 mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
54 				struct mlx5_esw_bridge *bridge)
55 {
56 	struct mlx5_flow_act flow_act = {
57 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
58 		.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
59 	};
60 	int num_dests = entry->num_ports, i = 0;
61 	struct mlx5_flow_destination *dests;
62 	struct mlx5_esw_bridge_port *port;
63 	struct mlx5_flow_spec *rule_spec;
64 	struct mlx5_flow_handle *handle;
65 	u8 *dmac_v, *dmac_c;
66 	unsigned long idx;
67 
68 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
69 	if (!rule_spec)
70 		return ERR_PTR(-ENOMEM);
71 
72 	dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
73 	if (!dests) {
74 		kvfree(rule_spec);
75 		return ERR_PTR(-ENOMEM);
76 	}
77 
78 	xa_for_each(&entry->ports, idx, port) {
79 		dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
80 		dests[i].ft = port->mcast.ft;
81 		i++;
82 	}
83 
84 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
85 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
86 	ether_addr_copy(dmac_v, entry->key.addr);
87 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
88 	eth_broadcast_addr(dmac_c);
89 
90 	if (entry->key.vid) {
91 		if (bridge->vlan_proto == ETH_P_8021Q) {
92 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
93 					 outer_headers.cvlan_tag);
94 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
95 					 outer_headers.cvlan_tag);
96 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
97 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
98 					 outer_headers.svlan_tag);
99 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
100 					 outer_headers.svlan_tag);
101 		}
102 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
103 				 outer_headers.first_vid);
104 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
105 			 entry->key.vid);
106 	}
107 
108 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
109 
110 	kvfree(dests);
111 	kvfree(rule_spec);
112 	return handle;
113 }
114 
115 static int
116 mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
117 				 struct mlx5_esw_bridge_mdb_entry *entry)
118 {
119 	struct mlx5_flow_handle *handle;
120 
121 	handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
122 	if (entry->egress_handle) {
123 		mlx5_del_flow_rules(entry->egress_handle);
124 		entry->egress_handle = NULL;
125 	}
126 	if (IS_ERR(handle))
127 		return PTR_ERR(handle);
128 
129 	entry->egress_handle = handle;
130 	return 0;
131 }
132 
133 static struct mlx5_esw_bridge_mdb_entry *
134 mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
135 			   const unsigned char *addr, u16 vid)
136 {
137 	struct mlx5_esw_bridge_mdb_key key = {};
138 
139 	ether_addr_copy(key.addr, addr);
140 	key.vid = vid;
141 	return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
142 }
143 
144 static struct mlx5_esw_bridge_mdb_entry *
145 mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
146 				    const unsigned char *addr, u16 vid)
147 {
148 	struct mlx5_esw_bridge *bridge = port->bridge;
149 	struct mlx5_esw_bridge_mdb_entry *entry;
150 	int err;
151 
152 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
153 	if (!entry)
154 		return ERR_PTR(-ENOMEM);
155 
156 	ether_addr_copy(entry->key.addr, addr);
157 	entry->key.vid = vid;
158 	xa_init(&entry->ports);
159 	err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
160 	if (err)
161 		goto err_ht_insert;
162 
163 	list_add(&entry->list, &bridge->mdb_list);
164 
165 	return entry;
166 
167 err_ht_insert:
168 	xa_destroy(&entry->ports);
169 	kvfree(entry);
170 	return ERR_PTR(err);
171 }
172 
173 static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
174 						   struct mlx5_esw_bridge_mdb_entry *entry)
175 {
176 	if (entry->egress_handle)
177 		mlx5_del_flow_rules(entry->egress_handle);
178 	list_del(&entry->list);
179 	rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
180 	xa_destroy(&entry->ports);
181 	kvfree(entry);
182 }
183 
184 int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
185 				    const unsigned char *addr, u16 vid)
186 {
187 	struct mlx5_esw_bridge *bridge = port->bridge;
188 	struct mlx5_esw_bridge_mdb_entry *entry;
189 	int err;
190 
191 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
192 		return -EOPNOTSUPP;
193 
194 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
195 	if (entry) {
196 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
197 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
198 				 addr, vid, port->vport_num);
199 			return 0;
200 		}
201 	} else {
202 		entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
203 		if (IS_ERR(entry)) {
204 			err = PTR_ERR(entry);
205 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
206 				 addr, vid, port->vport_num, err);
207 			return err;
208 		}
209 	}
210 
211 	err = mlx5_esw_bridge_mdb_port_insert(port, entry);
212 	if (err) {
213 		if (!entry->num_ports)
214 			mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
215 		esw_warn(bridge->br_offloads->esw->dev,
216 			 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
217 			 addr, vid, port->vport_num, err);
218 		return err;
219 	}
220 
221 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
222 	if (err)
223 		/* Single mdb can be used by multiple ports, so just log the
224 		 * error and continue.
225 		 */
226 		esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
227 			 addr, vid, port->vport_num, err);
228 
229 	trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
230 	return 0;
231 }
232 
233 static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
234 						  struct mlx5_esw_bridge_mdb_entry *entry)
235 {
236 	struct mlx5_esw_bridge *bridge = port->bridge;
237 	int err;
238 
239 	mlx5_esw_bridge_mdb_port_remove(port, entry);
240 	if (!entry->num_ports) {
241 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
242 		return;
243 	}
244 
245 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
246 	if (err)
247 		/* Single mdb can be used by multiple ports, so just log the
248 		 * error and continue.
249 		 */
250 		esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
251 			 entry->key.addr, entry->key.vid, port->vport_num);
252 }
253 
254 void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
255 				     const unsigned char *addr, u16 vid)
256 {
257 	struct mlx5_esw_bridge *bridge = port->bridge;
258 	struct mlx5_esw_bridge_mdb_entry *entry;
259 
260 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
261 	if (!entry) {
262 		esw_debug(bridge->br_offloads->esw->dev,
263 			  "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
264 			  addr, vid, port->vport_num);
265 		return;
266 	}
267 
268 	if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
269 		esw_debug(bridge->br_offloads->esw->dev,
270 			  "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
271 			  addr, vid, port->vport_num);
272 		return;
273 	}
274 
275 	trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
276 	mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
277 }
278 
279 void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
280 					 struct mlx5_esw_bridge_vlan *vlan)
281 {
282 	struct mlx5_esw_bridge *bridge = port->bridge;
283 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
284 
285 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
286 		if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
287 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
288 }
289 
290 static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
291 {
292 	struct mlx5_esw_bridge *bridge = port->bridge;
293 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
294 
295 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
296 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
297 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
298 }
299 
300 void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
301 {
302 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
303 
304 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
305 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
306 }
307 static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
308 					       struct mlx5_esw_bridge *bridge)
309 {
310 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
311 	struct mlx5_flow_table *mcast_ft;
312 
313 	mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
314 						MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
315 						esw);
316 	if (IS_ERR(mcast_ft))
317 		return PTR_ERR(mcast_ft);
318 
319 	port->mcast.ft = mcast_ft;
320 	return 0;
321 }
322 
323 static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
324 {
325 	if (port->mcast.ft)
326 		mlx5_destroy_flow_table(port->mcast.ft);
327 	port->mcast.ft = NULL;
328 }
329 
330 static struct mlx5_flow_group *
331 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
332 				       struct mlx5_flow_table *mcast_ft)
333 {
334 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
335 	struct mlx5_flow_group *fg;
336 	u32 *in, *match;
337 
338 	in = kvzalloc(inlen, GFP_KERNEL);
339 	if (!in)
340 		return ERR_PTR(-ENOMEM);
341 
342 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
343 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
344 
345 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
346 		 mlx5_eswitch_get_vport_metadata_mask());
347 
348 	MLX5_SET(create_flow_group_in, in, start_flow_index,
349 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
350 	MLX5_SET(create_flow_group_in, in, end_flow_index,
351 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
352 
353 	fg = mlx5_create_flow_group(mcast_ft, in);
354 	kvfree(in);
355 	if (IS_ERR(fg))
356 		esw_warn(esw->dev,
357 			 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
358 			 fg);
359 
360 	return fg;
361 }
362 
363 static struct mlx5_flow_group *
364 mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
365 					   struct mlx5_eswitch *esw,
366 					   struct mlx5_flow_table *mcast_ft)
367 {
368 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
369 	struct mlx5_flow_group *fg;
370 	u32 *in, *match;
371 
372 	in = kvzalloc(inlen, GFP_KERNEL);
373 	if (!in)
374 		return ERR_PTR(-ENOMEM);
375 
376 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
377 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
378 
379 	if (vlan_proto == ETH_P_8021Q)
380 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
381 	else if (vlan_proto == ETH_P_8021AD)
382 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
383 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
384 
385 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
386 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
387 
388 	fg = mlx5_create_flow_group(mcast_ft, in);
389 	kvfree(in);
390 	if (IS_ERR(fg))
391 		esw_warn(esw->dev,
392 			 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
393 			 vlan_proto, fg);
394 
395 	return fg;
396 }
397 
398 static struct mlx5_flow_group *
399 mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
400 {
401 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
402 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
403 
404 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
405 }
406 
407 static struct mlx5_flow_group *
408 mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
409 				     struct mlx5_flow_table *mcast_ft)
410 {
411 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
412 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
413 
414 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
415 }
416 
417 static struct mlx5_flow_group *
418 mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
419 				    struct mlx5_flow_table *mcast_ft)
420 {
421 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
422 	struct mlx5_flow_group *fg;
423 	u32 *in;
424 
425 	in = kvzalloc(inlen, GFP_KERNEL);
426 	if (!in)
427 		return ERR_PTR(-ENOMEM);
428 
429 	MLX5_SET(create_flow_group_in, in, start_flow_index,
430 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
431 	MLX5_SET(create_flow_group_in, in, end_flow_index,
432 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
433 
434 	fg = mlx5_create_flow_group(mcast_ft, in);
435 	kvfree(in);
436 	if (IS_ERR(fg))
437 		esw_warn(esw->dev,
438 			 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
439 			 fg);
440 
441 	return fg;
442 }
443 
444 static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
445 {
446 	struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
447 	struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
448 	struct mlx5_flow_table *mcast_ft = port->mcast.ft;
449 	int err;
450 
451 	filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
452 	if (IS_ERR(filter_fg))
453 		return PTR_ERR(filter_fg);
454 
455 	vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
456 	if (IS_ERR(vlan_fg)) {
457 		err = PTR_ERR(vlan_fg);
458 		goto err_vlan_fg;
459 	}
460 
461 	qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
462 	if (IS_ERR(qinq_fg)) {
463 		err = PTR_ERR(qinq_fg);
464 		goto err_qinq_fg;
465 	}
466 
467 	fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
468 	if (IS_ERR(fwd_fg)) {
469 		err = PTR_ERR(fwd_fg);
470 		goto err_fwd_fg;
471 	}
472 
473 	port->mcast.filter_fg = filter_fg;
474 	port->mcast.vlan_fg = vlan_fg;
475 	port->mcast.qinq_fg = qinq_fg;
476 	port->mcast.fwd_fg = fwd_fg;
477 
478 	return 0;
479 
480 err_fwd_fg:
481 	mlx5_destroy_flow_group(qinq_fg);
482 err_qinq_fg:
483 	mlx5_destroy_flow_group(vlan_fg);
484 err_vlan_fg:
485 	mlx5_destroy_flow_group(filter_fg);
486 	return err;
487 }
488 
489 static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
490 {
491 	if (port->mcast.fwd_fg)
492 		mlx5_destroy_flow_group(port->mcast.fwd_fg);
493 	port->mcast.fwd_fg = NULL;
494 	if (port->mcast.qinq_fg)
495 		mlx5_destroy_flow_group(port->mcast.qinq_fg);
496 	port->mcast.qinq_fg = NULL;
497 	if (port->mcast.vlan_fg)
498 		mlx5_destroy_flow_group(port->mcast.vlan_fg);
499 	port->mcast.vlan_fg = NULL;
500 	if (port->mcast.filter_fg)
501 		mlx5_destroy_flow_group(port->mcast.filter_fg);
502 	port->mcast.filter_fg = NULL;
503 }
504 
505 static struct mlx5_flow_handle *
506 mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
507 					   struct mlx5_eswitch *esw)
508 {
509 	struct mlx5_flow_act flow_act = {
510 		.action = MLX5_FLOW_CONTEXT_ACTION_DROP,
511 		.flags = FLOW_ACT_NO_APPEND,
512 	};
513 	struct mlx5_flow_spec *rule_spec;
514 	struct mlx5_flow_handle *handle;
515 
516 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
517 	if (!rule_spec)
518 		return ERR_PTR(-ENOMEM);
519 
520 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
521 
522 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
523 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
524 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
525 		 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
526 
527 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
528 
529 	kvfree(rule_spec);
530 	return handle;
531 }
532 
533 static struct mlx5_flow_handle *
534 mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
535 {
536 	return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
537 }
538 
539 static struct mlx5_flow_handle *
540 mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
541 {
542 	struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
543 	static struct mlx5_flow_handle *handle;
544 	struct mlx5_eswitch *peer_esw;
545 
546 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
547 	if (!peer_esw)
548 		return ERR_PTR(-ENODEV);
549 
550 	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
551 
552 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
553 	return handle;
554 }
555 
556 static struct mlx5_flow_handle *
557 mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
558 				       struct mlx5_esw_bridge_vlan *vlan)
559 {
560 	struct mlx5_flow_act flow_act = {
561 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
562 		.flags = FLOW_ACT_NO_APPEND,
563 	};
564 	struct mlx5_flow_destination dest = {
565 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
566 		.vport.num = port->vport_num,
567 	};
568 	struct mlx5_esw_bridge *bridge = port->bridge;
569 	struct mlx5_flow_spec *rule_spec;
570 	struct mlx5_flow_handle *handle;
571 
572 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
573 	if (!rule_spec)
574 		return ERR_PTR(-ENOMEM);
575 
576 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
577 	    port->vport_num == MLX5_VPORT_UPLINK)
578 		rule_spec->flow_context.flow_source =
579 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
580 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
581 
582 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
583 	flow_act.pkt_reformat = vlan->pkt_reformat_pop;
584 
585 	if (vlan_proto == ETH_P_8021Q) {
586 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
587 				 outer_headers.cvlan_tag);
588 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
589 				 outer_headers.cvlan_tag);
590 	} else if (vlan_proto == ETH_P_8021AD) {
591 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
592 				 outer_headers.svlan_tag);
593 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
594 				 outer_headers.svlan_tag);
595 	}
596 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
597 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
598 
599 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
600 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
601 		dest.vport.vhca_id = port->esw_owner_vhca_id;
602 	}
603 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
604 
605 	kvfree(rule_spec);
606 	return handle;
607 }
608 
609 int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
610 				    struct mlx5_esw_bridge_vlan *vlan)
611 {
612 	struct mlx5_flow_handle *handle;
613 
614 	if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
615 		return 0;
616 
617 	handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
618 	if (IS_ERR(handle))
619 		return PTR_ERR(handle);
620 
621 	vlan->mcast_handle = handle;
622 	return 0;
623 }
624 
625 void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
626 {
627 	if (vlan->mcast_handle)
628 		mlx5_del_flow_rules(vlan->mcast_handle);
629 	vlan->mcast_handle = NULL;
630 }
631 
632 static struct mlx5_flow_handle *
633 mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
634 {
635 	struct mlx5_flow_act flow_act = {
636 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
637 		.flags = FLOW_ACT_NO_APPEND,
638 	};
639 	struct mlx5_flow_destination dest = {
640 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
641 		.vport.num = port->vport_num,
642 	};
643 	struct mlx5_esw_bridge *bridge = port->bridge;
644 	struct mlx5_flow_spec *rule_spec;
645 	struct mlx5_flow_handle *handle;
646 
647 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
648 	if (!rule_spec)
649 		return ERR_PTR(-ENOMEM);
650 
651 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
652 	    port->vport_num == MLX5_VPORT_UPLINK)
653 		rule_spec->flow_context.flow_source =
654 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
655 
656 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
657 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
658 		dest.vport.vhca_id = port->esw_owner_vhca_id;
659 	}
660 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
661 
662 	kvfree(rule_spec);
663 	return handle;
664 }
665 
666 static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
667 {
668 	struct mlx5_flow_handle *filter_handle, *fwd_handle;
669 	struct mlx5_esw_bridge_vlan *vlan, *failed;
670 	unsigned long index;
671 	int err;
672 
673 
674 	filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
675 		mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
676 		mlx5_esw_bridge_mcast_filter_flow_create(port);
677 	if (IS_ERR(filter_handle))
678 		return PTR_ERR(filter_handle);
679 
680 	fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
681 	if (IS_ERR(fwd_handle)) {
682 		err = PTR_ERR(fwd_handle);
683 		goto err_fwd;
684 	}
685 
686 	xa_for_each(&port->vlans, index, vlan) {
687 		err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
688 		if (err) {
689 			failed = vlan;
690 			goto err_vlan;
691 		}
692 	}
693 
694 	port->mcast.filter_handle = filter_handle;
695 	port->mcast.fwd_handle = fwd_handle;
696 
697 	return 0;
698 
699 err_vlan:
700 	xa_for_each(&port->vlans, index, vlan) {
701 		if (vlan == failed)
702 			break;
703 
704 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
705 	}
706 	mlx5_del_flow_rules(fwd_handle);
707 err_fwd:
708 	mlx5_del_flow_rules(filter_handle);
709 	return err;
710 }
711 
712 static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
713 {
714 	struct mlx5_esw_bridge_vlan *vlan;
715 	unsigned long index;
716 
717 	xa_for_each(&port->vlans, index, vlan)
718 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
719 
720 	if (port->mcast.fwd_handle)
721 		mlx5_del_flow_rules(port->mcast.fwd_handle);
722 	port->mcast.fwd_handle = NULL;
723 	if (port->mcast.filter_handle)
724 		mlx5_del_flow_rules(port->mcast.filter_handle);
725 	port->mcast.filter_handle = NULL;
726 }
727 
728 int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
729 {
730 	struct mlx5_esw_bridge *bridge = port->bridge;
731 	int err;
732 
733 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
734 		return 0;
735 
736 	err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
737 	if (err)
738 		return err;
739 
740 	err = mlx5_esw_bridge_port_mcast_fgs_init(port);
741 	if (err)
742 		goto err_fgs;
743 
744 	err = mlx5_esw_bridge_port_mcast_fhs_init(port);
745 	if (err)
746 		goto err_fhs;
747 	return err;
748 
749 err_fhs:
750 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
751 err_fgs:
752 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
753 	return err;
754 }
755 
756 void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
757 {
758 	mlx5_esw_bridge_port_mdb_flush(port);
759 	mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
760 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
761 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
762 }
763 
764 static struct mlx5_flow_group *
765 mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
766 				       struct mlx5_flow_table *ingress_ft)
767 {
768 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
769 	struct mlx5_flow_group *fg;
770 	u32 *in, *match;
771 
772 	in = kvzalloc(inlen, GFP_KERNEL);
773 	if (!in)
774 		return ERR_PTR(-ENOMEM);
775 
776 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
777 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
778 
779 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
780 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
781 
782 	MLX5_SET(create_flow_group_in, in, start_flow_index,
783 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
784 	MLX5_SET(create_flow_group_in, in, end_flow_index,
785 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
786 
787 	fg = mlx5_create_flow_group(ingress_ft, in);
788 	kvfree(in);
789 	if (IS_ERR(fg))
790 		esw_warn(esw->dev,
791 			 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
792 			 fg);
793 
794 	return fg;
795 }
796 
797 static struct mlx5_flow_group *
798 mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
799 				      struct mlx5_flow_table *ingress_ft)
800 {
801 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
802 	struct mlx5_flow_group *fg;
803 	u32 *in, *match;
804 
805 	if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
806 		esw_warn(esw->dev,
807 			 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
808 		return NULL;
809 	}
810 
811 	in = kvzalloc(inlen, GFP_KERNEL);
812 	if (!in)
813 		return ERR_PTR(-ENOMEM);
814 
815 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
816 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
817 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
818 
819 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
820 	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
821 
822 	MLX5_SET(create_flow_group_in, in, start_flow_index,
823 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
824 	MLX5_SET(create_flow_group_in, in, end_flow_index,
825 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
826 
827 	fg = mlx5_create_flow_group(ingress_ft, in);
828 	kvfree(in);
829 	if (IS_ERR(fg))
830 		esw_warn(esw->dev,
831 			 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
832 			 fg);
833 
834 	return fg;
835 }
836 
837 static int
838 mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
839 {
840 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
841 	struct mlx5_eswitch *esw = br_offloads->esw;
842 	struct mlx5_flow_group *igmp_fg, *mld_fg;
843 
844 	igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
845 	if (IS_ERR(igmp_fg))
846 		return PTR_ERR(igmp_fg);
847 
848 	mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
849 	if (IS_ERR(mld_fg)) {
850 		mlx5_destroy_flow_group(igmp_fg);
851 		return PTR_ERR(mld_fg);
852 	}
853 
854 	br_offloads->ingress_igmp_fg = igmp_fg;
855 	br_offloads->ingress_mld_fg = mld_fg;
856 	return 0;
857 }
858 
859 static void
860 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
861 {
862 	if (br_offloads->ingress_mld_fg)
863 		mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
864 	br_offloads->ingress_mld_fg = NULL;
865 	if (br_offloads->ingress_igmp_fg)
866 		mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
867 	br_offloads->ingress_igmp_fg = NULL;
868 }
869 
870 static struct mlx5_flow_handle *
871 mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
872 				       struct mlx5_flow_table *skip_ft)
873 {
874 	struct mlx5_flow_destination dest = {
875 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
876 		.ft = skip_ft,
877 	};
878 	struct mlx5_flow_act flow_act = {
879 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
880 		.flags = FLOW_ACT_NO_APPEND,
881 	};
882 	struct mlx5_flow_spec *rule_spec;
883 	struct mlx5_flow_handle *handle;
884 
885 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
886 	if (!rule_spec)
887 		return ERR_PTR(-ENOMEM);
888 
889 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
890 
891 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
892 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
893 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
894 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
895 
896 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
897 
898 	kvfree(rule_spec);
899 	return handle;
900 }
901 
902 static struct mlx5_flow_handle *
903 mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
904 				      struct mlx5_flow_table *skip_ft)
905 {
906 	struct mlx5_flow_destination dest = {
907 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
908 		.ft = skip_ft,
909 	};
910 	struct mlx5_flow_act flow_act = {
911 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
912 		.flags = FLOW_ACT_NO_APPEND,
913 	};
914 	struct mlx5_flow_spec *rule_spec;
915 	struct mlx5_flow_handle *handle;
916 
917 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
918 	if (!rule_spec)
919 		return ERR_PTR(-ENOMEM);
920 
921 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
922 
923 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
924 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
925 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
926 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
927 
928 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
929 
930 	kvfree(rule_spec);
931 	return handle;
932 }
933 
934 static int
935 mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
936 {
937 	struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
938 		*mld_done_handle;
939 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
940 		*skip_ft = br_offloads->skip_ft;
941 	int err;
942 
943 	igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
944 	if (IS_ERR(igmp_handle))
945 		return PTR_ERR(igmp_handle);
946 
947 	if (br_offloads->ingress_mld_fg) {
948 		mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
949 									 ingress_ft,
950 									 skip_ft);
951 		if (IS_ERR(mld_query_handle)) {
952 			err = PTR_ERR(mld_query_handle);
953 			goto err_mld_query;
954 		}
955 
956 		mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
957 									  ingress_ft,
958 									  skip_ft);
959 		if (IS_ERR(mld_report_handle)) {
960 			err = PTR_ERR(mld_report_handle);
961 			goto err_mld_report;
962 		}
963 
964 		mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
965 									ingress_ft,
966 									skip_ft);
967 		if (IS_ERR(mld_done_handle)) {
968 			err = PTR_ERR(mld_done_handle);
969 			goto err_mld_done;
970 		}
971 	} else {
972 		mld_query_handle = NULL;
973 		mld_report_handle = NULL;
974 		mld_done_handle = NULL;
975 	}
976 
977 	br_offloads->igmp_handle = igmp_handle;
978 	br_offloads->mld_query_handle = mld_query_handle;
979 	br_offloads->mld_report_handle = mld_report_handle;
980 	br_offloads->mld_done_handle = mld_done_handle;
981 
982 	return 0;
983 
984 err_mld_done:
985 	mlx5_del_flow_rules(mld_report_handle);
986 err_mld_report:
987 	mlx5_del_flow_rules(mld_query_handle);
988 err_mld_query:
989 	mlx5_del_flow_rules(igmp_handle);
990 	return err;
991 }
992 
993 static void
994 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
995 {
996 	if (br_offloads->mld_done_handle)
997 		mlx5_del_flow_rules(br_offloads->mld_done_handle);
998 	br_offloads->mld_done_handle = NULL;
999 	if (br_offloads->mld_report_handle)
1000 		mlx5_del_flow_rules(br_offloads->mld_report_handle);
1001 	br_offloads->mld_report_handle = NULL;
1002 	if (br_offloads->mld_query_handle)
1003 		mlx5_del_flow_rules(br_offloads->mld_query_handle);
1004 	br_offloads->mld_query_handle = NULL;
1005 	if (br_offloads->igmp_handle)
1006 		mlx5_del_flow_rules(br_offloads->igmp_handle);
1007 	br_offloads->igmp_handle = NULL;
1008 }
1009 
1010 static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1011 {
1012 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1013 	struct mlx5_esw_bridge_port *port, *failed;
1014 	unsigned long i;
1015 	int err;
1016 
1017 	xa_for_each(&br_offloads->ports, i, port) {
1018 		if (port->bridge != bridge)
1019 			continue;
1020 
1021 		err = mlx5_esw_bridge_port_mcast_init(port);
1022 		if (err) {
1023 			failed = port;
1024 			goto err_port;
1025 		}
1026 	}
1027 	return 0;
1028 
1029 err_port:
1030 	xa_for_each(&br_offloads->ports, i, port) {
1031 		if (port == failed)
1032 			break;
1033 		if (port->bridge != bridge)
1034 			continue;
1035 
1036 		mlx5_esw_bridge_port_mcast_cleanup(port);
1037 	}
1038 	return err;
1039 }
1040 
1041 static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1042 {
1043 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1044 	struct mlx5_esw_bridge_port *port;
1045 	unsigned long i;
1046 
1047 	xa_for_each(&br_offloads->ports, i, port) {
1048 		if (port->bridge != bridge)
1049 			continue;
1050 
1051 		mlx5_esw_bridge_port_mcast_cleanup(port);
1052 	}
1053 }
1054 
1055 static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1056 {
1057 	int err;
1058 
1059 	if (br_offloads->ingress_igmp_fg)
1060 		return 0; /* already enabled by another bridge */
1061 
1062 	err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1063 	if (err) {
1064 		esw_warn(br_offloads->esw->dev,
1065 			 "Failed to create global multicast flow groups (err=%d)\n",
1066 			 err);
1067 		return err;
1068 	}
1069 
1070 	err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1071 	if (err) {
1072 		esw_warn(br_offloads->esw->dev,
1073 			 "Failed to create global multicast flows (err=%d)\n",
1074 			 err);
1075 		goto err_fhs;
1076 	}
1077 
1078 	return 0;
1079 
1080 err_fhs:
1081 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1082 	return err;
1083 }
1084 
1085 static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1086 {
1087 	struct mlx5_esw_bridge *br;
1088 
1089 	list_for_each_entry(br, &br_offloads->bridges, list) {
1090 		/* Ingress table is global, so only disable snooping when all
1091 		 * bridges on esw have multicast disabled.
1092 		 */
1093 		if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1094 			return;
1095 	}
1096 
1097 	mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1098 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1099 }
1100 
1101 int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1102 {
1103 	int err;
1104 
1105 	err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1106 	if (err)
1107 		return err;
1108 
1109 	bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1110 
1111 	err = mlx5_esw_brige_mcast_init(bridge);
1112 	if (err) {
1113 		esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1114 			 err);
1115 		bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1116 		mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1117 	}
1118 	return err;
1119 }
1120 
1121 void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1122 {
1123 	mlx5_esw_brige_mcast_cleanup(bridge);
1124 	bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1125 	mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1126 }
1127