xref: /openbmc/linux/net/bridge/br_multicast_eht.c (revision 71501859)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/if_ether.h>
6 #include <linux/igmp.h>
7 #include <linux/in.h>
8 #include <linux/jhash.h>
9 #include <linux/kernel.h>
10 #include <linux/log2.h>
11 #include <linux/netdevice.h>
12 #include <linux/netfilter_bridge.h>
13 #include <linux/random.h>
14 #include <linux/rculist.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include <linux/timer.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mroute.h>
20 #include <net/ip.h>
21 #include <net/switchdev.h>
22 #if IS_ENABLED(CONFIG_IPV6)
23 #include <linux/icmpv6.h>
24 #include <net/ipv6.h>
25 #include <net/mld.h>
26 #include <net/ip6_checksum.h>
27 #include <net/addrconf.h>
28 #endif
29 
30 #include "br_private.h"
31 #include "br_private_mcast_eht.h"
32 
33 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
34 					   union net_bridge_eht_addr *src_addr,
35 					   union net_bridge_eht_addr *h_addr);
36 static void br_multicast_create_eht_set_entry(struct net_bridge_port_group *pg,
37 					      union net_bridge_eht_addr *src_addr,
38 					      union net_bridge_eht_addr *h_addr,
39 					      int filter_mode,
40 					      bool allow_zero_src);
41 
42 static struct net_bridge_group_eht_host *
43 br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
44 			     union net_bridge_eht_addr *h_addr)
45 {
46 	struct rb_node *node = pg->eht_host_tree.rb_node;
47 
48 	while (node) {
49 		struct net_bridge_group_eht_host *this;
50 		int result;
51 
52 		this = rb_entry(node, struct net_bridge_group_eht_host,
53 				rb_node);
54 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
55 		if (result < 0)
56 			node = node->rb_left;
57 		else if (result > 0)
58 			node = node->rb_right;
59 		else
60 			return this;
61 	}
62 
63 	return NULL;
64 }
65 
66 static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
67 					     union net_bridge_eht_addr *h_addr)
68 {
69 	struct net_bridge_group_eht_host *eht_host;
70 
71 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
72 	if (!eht_host)
73 		return MCAST_INCLUDE;
74 
75 	return eht_host->filter_mode;
76 }
77 
78 static struct net_bridge_group_eht_set_entry *
79 br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
80 				  union net_bridge_eht_addr *h_addr)
81 {
82 	struct rb_node *node = eht_set->entry_tree.rb_node;
83 
84 	while (node) {
85 		struct net_bridge_group_eht_set_entry *this;
86 		int result;
87 
88 		this = rb_entry(node, struct net_bridge_group_eht_set_entry,
89 				rb_node);
90 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
91 		if (result < 0)
92 			node = node->rb_left;
93 		else if (result > 0)
94 			node = node->rb_right;
95 		else
96 			return this;
97 	}
98 
99 	return NULL;
100 }
101 
102 static struct net_bridge_group_eht_set *
103 br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
104 			    union net_bridge_eht_addr *src_addr)
105 {
106 	struct rb_node *node = pg->eht_set_tree.rb_node;
107 
108 	while (node) {
109 		struct net_bridge_group_eht_set *this;
110 		int result;
111 
112 		this = rb_entry(node, struct net_bridge_group_eht_set,
113 				rb_node);
114 		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
115 		if (result < 0)
116 			node = node->rb_left;
117 		else if (result > 0)
118 			node = node->rb_right;
119 		else
120 			return this;
121 	}
122 
123 	return NULL;
124 }
125 
126 static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
127 {
128 	WARN_ON(!hlist_empty(&eht_host->set_entries));
129 
130 	br_multicast_eht_hosts_dec(eht_host->pg);
131 
132 	rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
133 	RB_CLEAR_NODE(&eht_host->rb_node);
134 	kfree(eht_host);
135 }
136 
137 static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
138 {
139 	struct net_bridge_group_eht_set_entry *set_h;
140 
141 	set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
142 	WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
143 
144 	del_timer_sync(&set_h->timer);
145 	kfree(set_h);
146 }
147 
148 static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
149 {
150 	struct net_bridge_group_eht_set *eht_set;
151 
152 	eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
153 	WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
154 	WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
155 
156 	del_timer_sync(&eht_set->timer);
157 	kfree(eht_set);
158 }
159 
160 static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
161 {
162 	struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
163 	union net_bridge_eht_addr zero_addr;
164 
165 	rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
166 	RB_CLEAR_NODE(&set_h->rb_node);
167 	hlist_del_init(&set_h->host_list);
168 	memset(&zero_addr, 0, sizeof(zero_addr));
169 	if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
170 		eht_host->num_entries--;
171 	hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
172 	queue_work(system_long_wq, &set_h->br->mcast_gc_work);
173 
174 	if (hlist_empty(&eht_host->set_entries))
175 		__eht_destroy_host(eht_host);
176 }
177 
178 static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
179 {
180 	struct net_bridge_group_eht_set_entry *set_h;
181 	struct rb_node *node;
182 
183 	while ((node = rb_first(&eht_set->entry_tree))) {
184 		set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
185 				 rb_node);
186 		__eht_del_set_entry(set_h);
187 	}
188 
189 	rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
190 	RB_CLEAR_NODE(&eht_set->rb_node);
191 	hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
192 	queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
193 }
194 
195 void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
196 {
197 	struct net_bridge_group_eht_set *eht_set;
198 	struct rb_node *node;
199 
200 	while ((node = rb_first(&pg->eht_set_tree))) {
201 		eht_set = rb_entry(node, struct net_bridge_group_eht_set,
202 				   rb_node);
203 		br_multicast_del_eht_set(eht_set);
204 	}
205 }
206 
207 static void br_multicast_eht_set_entry_expired(struct timer_list *t)
208 {
209 	struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer);
210 	struct net_bridge *br = set_h->br;
211 
212 	spin_lock(&br->multicast_lock);
213 	if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
214 		goto out;
215 
216 	br_multicast_del_eht_set_entry(set_h->eht_set->pg,
217 				       &set_h->eht_set->src_addr,
218 				       &set_h->h_addr);
219 out:
220 	spin_unlock(&br->multicast_lock);
221 }
222 
223 static void br_multicast_eht_set_expired(struct timer_list *t)
224 {
225 	struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t,
226 							      timer);
227 	struct net_bridge *br = eht_set->br;
228 
229 	spin_lock(&br->multicast_lock);
230 	if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
231 		goto out;
232 
233 	br_multicast_del_eht_set(eht_set);
234 out:
235 	spin_unlock(&br->multicast_lock);
236 }
237 
238 static struct net_bridge_group_eht_host *
239 __eht_lookup_create_host(struct net_bridge_port_group *pg,
240 			 union net_bridge_eht_addr *h_addr,
241 			 unsigned char filter_mode)
242 {
243 	struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
244 	struct net_bridge_group_eht_host *eht_host;
245 
246 	while (*link) {
247 		struct net_bridge_group_eht_host *this;
248 		int result;
249 
250 		this = rb_entry(*link, struct net_bridge_group_eht_host,
251 				rb_node);
252 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
253 		parent = *link;
254 		if (result < 0)
255 			link = &((*link)->rb_left);
256 		else if (result > 0)
257 			link = &((*link)->rb_right);
258 		else
259 			return this;
260 	}
261 
262 	if (br_multicast_eht_hosts_over_limit(pg))
263 		return NULL;
264 
265 	eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
266 	if (!eht_host)
267 		return NULL;
268 
269 	memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
270 	INIT_HLIST_HEAD(&eht_host->set_entries);
271 	eht_host->pg = pg;
272 	eht_host->filter_mode = filter_mode;
273 
274 	rb_link_node(&eht_host->rb_node, parent, link);
275 	rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
276 
277 	br_multicast_eht_hosts_inc(pg);
278 
279 	return eht_host;
280 }
281 
282 static struct net_bridge_group_eht_set_entry *
283 __eht_lookup_create_set_entry(struct net_bridge *br,
284 			      struct net_bridge_group_eht_set *eht_set,
285 			      struct net_bridge_group_eht_host *eht_host,
286 			      bool allow_zero_src)
287 {
288 	struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
289 	struct net_bridge_group_eht_set_entry *set_h;
290 
291 	while (*link) {
292 		struct net_bridge_group_eht_set_entry *this;
293 		int result;
294 
295 		this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
296 				rb_node);
297 		result = memcmp(&eht_host->h_addr, &this->h_addr,
298 				sizeof(union net_bridge_eht_addr));
299 		parent = *link;
300 		if (result < 0)
301 			link = &((*link)->rb_left);
302 		else if (result > 0)
303 			link = &((*link)->rb_right);
304 		else
305 			return this;
306 	}
307 
308 	/* always allow auto-created zero entry */
309 	if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
310 		return NULL;
311 
312 	set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
313 	if (!set_h)
314 		return NULL;
315 
316 	memcpy(&set_h->h_addr, &eht_host->h_addr,
317 	       sizeof(union net_bridge_eht_addr));
318 	set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
319 	set_h->eht_set = eht_set;
320 	set_h->h_parent = eht_host;
321 	set_h->br = br;
322 	timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
323 
324 	hlist_add_head(&set_h->host_list, &eht_host->set_entries);
325 	rb_link_node(&set_h->rb_node, parent, link);
326 	rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
327 	/* we must not count the auto-created zero entry otherwise we won't be
328 	 * able to track the full list of PG_SRC_ENT_LIMIT entries
329 	 */
330 	if (!allow_zero_src)
331 		eht_host->num_entries++;
332 
333 	return set_h;
334 }
335 
336 static struct net_bridge_group_eht_set *
337 __eht_lookup_create_set(struct net_bridge_port_group *pg,
338 			union net_bridge_eht_addr *src_addr)
339 {
340 	struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
341 	struct net_bridge_group_eht_set *eht_set;
342 
343 	while (*link) {
344 		struct net_bridge_group_eht_set *this;
345 		int result;
346 
347 		this = rb_entry(*link, struct net_bridge_group_eht_set,
348 				rb_node);
349 		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
350 		parent = *link;
351 		if (result < 0)
352 			link = &((*link)->rb_left);
353 		else if (result > 0)
354 			link = &((*link)->rb_right);
355 		else
356 			return this;
357 	}
358 
359 	eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
360 	if (!eht_set)
361 		return NULL;
362 
363 	memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
364 	eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
365 	eht_set->pg = pg;
366 	eht_set->br = pg->key.port->br;
367 	eht_set->entry_tree = RB_ROOT;
368 	timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
369 
370 	rb_link_node(&eht_set->rb_node, parent, link);
371 	rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
372 
373 	return eht_set;
374 }
375 
376 static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
377 					    union net_bridge_eht_addr *dest)
378 {
379 	switch (src->proto) {
380 	case htons(ETH_P_IP):
381 		dest->ip4 = src->src.ip4;
382 		break;
383 #if IS_ENABLED(CONFIG_IPV6)
384 	case htons(ETH_P_IPV6):
385 		memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
386 		break;
387 #endif
388 	}
389 }
390 
391 static void br_eht_convert_host_filter_mode(struct net_bridge_port_group *pg,
392 					    union net_bridge_eht_addr *h_addr,
393 					    int filter_mode)
394 {
395 	struct net_bridge_group_eht_host *eht_host;
396 	union net_bridge_eht_addr zero_addr;
397 
398 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
399 	if (eht_host)
400 		eht_host->filter_mode = filter_mode;
401 
402 	memset(&zero_addr, 0, sizeof(zero_addr));
403 	switch (filter_mode) {
404 	case MCAST_INCLUDE:
405 		br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
406 		break;
407 	case MCAST_EXCLUDE:
408 		br_multicast_create_eht_set_entry(pg, &zero_addr, h_addr,
409 						  MCAST_EXCLUDE,
410 						  true);
411 		break;
412 	}
413 }
414 
415 static void br_multicast_create_eht_set_entry(struct net_bridge_port_group *pg,
416 					      union net_bridge_eht_addr *src_addr,
417 					      union net_bridge_eht_addr *h_addr,
418 					      int filter_mode,
419 					      bool allow_zero_src)
420 {
421 	struct net_bridge_group_eht_set_entry *set_h;
422 	struct net_bridge_group_eht_host *eht_host;
423 	struct net_bridge *br = pg->key.port->br;
424 	struct net_bridge_group_eht_set *eht_set;
425 	union net_bridge_eht_addr zero_addr;
426 
427 	memset(&zero_addr, 0, sizeof(zero_addr));
428 	if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
429 		return;
430 
431 	eht_set = __eht_lookup_create_set(pg, src_addr);
432 	if (!eht_set)
433 		return;
434 
435 	eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
436 	if (!eht_host)
437 		goto fail_host;
438 
439 	set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
440 					      allow_zero_src);
441 	if (!set_h)
442 		goto fail_set_entry;
443 
444 	mod_timer(&set_h->timer, jiffies + br_multicast_gmi(br));
445 	mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(br));
446 
447 	return;
448 
449 fail_set_entry:
450 	if (hlist_empty(&eht_host->set_entries))
451 		__eht_destroy_host(eht_host);
452 fail_host:
453 	if (RB_EMPTY_ROOT(&eht_set->entry_tree))
454 		br_multicast_del_eht_set(eht_set);
455 }
456 
457 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
458 					   union net_bridge_eht_addr *src_addr,
459 					   union net_bridge_eht_addr *h_addr)
460 {
461 	struct net_bridge_group_eht_set_entry *set_h;
462 	struct net_bridge_group_eht_set *eht_set;
463 	bool set_deleted = false;
464 
465 	eht_set = br_multicast_eht_set_lookup(pg, src_addr);
466 	if (!eht_set)
467 		goto out;
468 
469 	set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
470 	if (!set_h)
471 		goto out;
472 
473 	__eht_del_set_entry(set_h);
474 
475 	if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
476 		br_multicast_del_eht_set(eht_set);
477 		set_deleted = true;
478 	}
479 
480 out:
481 	return set_deleted;
482 }
483 
484 static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
485 				      union net_bridge_eht_addr *h_addr)
486 {
487 	struct net_bridge_group_eht_set_entry *set_h;
488 	struct net_bridge_group_eht_host *eht_host;
489 	struct hlist_node *tmp;
490 
491 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
492 	if (!eht_host)
493 		return;
494 
495 	hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
496 		br_multicast_del_eht_set_entry(set_h->eht_set->pg,
497 					       &set_h->eht_set->src_addr,
498 					       &set_h->h_addr);
499 }
500 
501 /* create new set entries from reports */
502 static void __eht_create_set_entries(struct net_bridge_port_group *pg,
503 				     union net_bridge_eht_addr *h_addr,
504 				     void *srcs,
505 				     u32 nsrcs,
506 				     size_t addr_size,
507 				     int filter_mode)
508 {
509 	union net_bridge_eht_addr eht_src_addr;
510 	u32 src_idx;
511 
512 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
513 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
514 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
515 		br_multicast_create_eht_set_entry(pg, &eht_src_addr, h_addr,
516 						  filter_mode,
517 						  false);
518 	}
519 }
520 
521 /* delete existing set entries and their (S,G) entries if they were the last */
522 static bool __eht_del_set_entries(struct net_bridge_port_group *pg,
523 				  union net_bridge_eht_addr *h_addr,
524 				  void *srcs,
525 				  u32 nsrcs,
526 				  size_t addr_size)
527 {
528 	union net_bridge_eht_addr eht_src_addr;
529 	struct net_bridge_group_src *src_ent;
530 	bool changed = false;
531 	struct br_ip src_ip;
532 	u32 src_idx;
533 
534 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
535 	memset(&src_ip, 0, sizeof(src_ip));
536 	src_ip.proto = pg->key.addr.proto;
537 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
538 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
539 		if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
540 			continue;
541 		memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
542 		src_ent = br_multicast_find_group_src(pg, &src_ip);
543 		if (!src_ent)
544 			continue;
545 		br_multicast_del_group_src(src_ent, true);
546 		changed = true;
547 	}
548 
549 	return changed;
550 }
551 
552 static bool br_multicast_eht_allow(struct net_bridge_port_group *pg,
553 				   union net_bridge_eht_addr *h_addr,
554 				   void *srcs,
555 				   u32 nsrcs,
556 				   size_t addr_size)
557 {
558 	bool changed = false;
559 
560 	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
561 	case MCAST_INCLUDE:
562 		__eht_create_set_entries(pg, h_addr, srcs, nsrcs, addr_size,
563 					 MCAST_INCLUDE);
564 		break;
565 	case MCAST_EXCLUDE:
566 		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
567 						addr_size);
568 		break;
569 	}
570 
571 	return changed;
572 }
573 
574 static bool br_multicast_eht_block(struct net_bridge_port_group *pg,
575 				   union net_bridge_eht_addr *h_addr,
576 				   void *srcs,
577 				   u32 nsrcs,
578 				   size_t addr_size)
579 {
580 	bool changed = false;
581 
582 	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
583 	case MCAST_INCLUDE:
584 		changed = __eht_del_set_entries(pg, h_addr, srcs, nsrcs,
585 						addr_size);
586 		break;
587 	case MCAST_EXCLUDE:
588 		__eht_create_set_entries(pg, h_addr, srcs, nsrcs, addr_size,
589 					 MCAST_EXCLUDE);
590 		break;
591 	}
592 
593 	return changed;
594 }
595 
596 /* flush_entries is true when changing mode */
597 static bool __eht_inc_exc(struct net_bridge_port_group *pg,
598 			  union net_bridge_eht_addr *h_addr,
599 			  void *srcs,
600 			  u32 nsrcs,
601 			  size_t addr_size,
602 			  unsigned char filter_mode,
603 			  bool to_report)
604 {
605 	bool changed = false, flush_entries = to_report;
606 	union net_bridge_eht_addr eht_src_addr;
607 
608 	if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
609 		flush_entries = true;
610 
611 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
612 	/* if we're changing mode del host and its entries */
613 	if (flush_entries)
614 		br_multicast_del_eht_host(pg, h_addr);
615 	__eht_create_set_entries(pg, h_addr, srcs, nsrcs, addr_size,
616 				 filter_mode);
617 	/* we can be missing sets only if we've deleted some entries */
618 	if (flush_entries) {
619 		struct net_bridge *br = pg->key.port->br;
620 		struct net_bridge_group_eht_set *eht_set;
621 		struct net_bridge_group_src *src_ent;
622 		struct hlist_node *tmp;
623 
624 		hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
625 			br_multicast_ip_src_to_eht_addr(&src_ent->addr,
626 							&eht_src_addr);
627 			if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
628 				br_multicast_del_group_src(src_ent, true);
629 				changed = true;
630 				continue;
631 			}
632 			/* this is an optimization for TO_INCLUDE where we lower
633 			 * the set's timeout to LMQT to catch timeout hosts:
634 			 * - host A (timing out): set entries X, Y
635 			 * - host B: set entry Z (new from current TO_INCLUDE)
636 			 *           sends BLOCK Z after LMQT but host A's EHT
637 			 *           entries still exist (unless lowered to LMQT
638 			 *           so they can timeout with the S,Gs)
639 			 * => we wait another LMQT, when we can just delete the
640 			 *    group immediately
641 			 */
642 			if (!(src_ent->flags & BR_SGRP_F_SEND) ||
643 			    filter_mode != MCAST_INCLUDE ||
644 			    !to_report)
645 				continue;
646 			eht_set = br_multicast_eht_set_lookup(pg,
647 							      &eht_src_addr);
648 			if (!eht_set)
649 				continue;
650 			mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(br));
651 		}
652 	}
653 
654 	return changed;
655 }
656 
657 static bool br_multicast_eht_inc(struct net_bridge_port_group *pg,
658 				 union net_bridge_eht_addr *h_addr,
659 				 void *srcs,
660 				 u32 nsrcs,
661 				 size_t addr_size,
662 				 bool to_report)
663 {
664 	bool changed;
665 
666 	changed = __eht_inc_exc(pg, h_addr, srcs, nsrcs, addr_size,
667 				MCAST_INCLUDE, to_report);
668 	br_eht_convert_host_filter_mode(pg, h_addr, MCAST_INCLUDE);
669 
670 	return changed;
671 }
672 
673 static bool br_multicast_eht_exc(struct net_bridge_port_group *pg,
674 				 union net_bridge_eht_addr *h_addr,
675 				 void *srcs,
676 				 u32 nsrcs,
677 				 size_t addr_size,
678 				 bool to_report)
679 {
680 	bool changed;
681 
682 	changed = __eht_inc_exc(pg, h_addr, srcs, nsrcs, addr_size,
683 				MCAST_EXCLUDE, to_report);
684 	br_eht_convert_host_filter_mode(pg, h_addr, MCAST_EXCLUDE);
685 
686 	return changed;
687 }
688 
689 static bool __eht_ip4_handle(struct net_bridge_port_group *pg,
690 			     union net_bridge_eht_addr *h_addr,
691 			     void *srcs,
692 			     u32 nsrcs,
693 			     int grec_type)
694 {
695 	bool changed = false, to_report = false;
696 
697 	switch (grec_type) {
698 	case IGMPV3_ALLOW_NEW_SOURCES:
699 		br_multicast_eht_allow(pg, h_addr, srcs, nsrcs, sizeof(__be32));
700 		break;
701 	case IGMPV3_BLOCK_OLD_SOURCES:
702 		changed = br_multicast_eht_block(pg, h_addr, srcs, nsrcs,
703 						 sizeof(__be32));
704 		break;
705 	case IGMPV3_CHANGE_TO_INCLUDE:
706 		to_report = true;
707 		fallthrough;
708 	case IGMPV3_MODE_IS_INCLUDE:
709 		changed = br_multicast_eht_inc(pg, h_addr, srcs, nsrcs,
710 					       sizeof(__be32), to_report);
711 		break;
712 	case IGMPV3_CHANGE_TO_EXCLUDE:
713 		to_report = true;
714 		fallthrough;
715 	case IGMPV3_MODE_IS_EXCLUDE:
716 		changed = br_multicast_eht_exc(pg, h_addr, srcs, nsrcs,
717 					       sizeof(__be32), to_report);
718 		break;
719 	}
720 
721 	return changed;
722 }
723 
724 #if IS_ENABLED(CONFIG_IPV6)
725 static bool __eht_ip6_handle(struct net_bridge_port_group *pg,
726 			     union net_bridge_eht_addr *h_addr,
727 			     void *srcs,
728 			     u32 nsrcs,
729 			     int grec_type)
730 {
731 	bool changed = false, to_report = false;
732 
733 	switch (grec_type) {
734 	case MLD2_ALLOW_NEW_SOURCES:
735 		br_multicast_eht_allow(pg, h_addr, srcs, nsrcs,
736 				       sizeof(struct in6_addr));
737 		break;
738 	case MLD2_BLOCK_OLD_SOURCES:
739 		changed = br_multicast_eht_block(pg, h_addr, srcs, nsrcs,
740 						 sizeof(struct in6_addr));
741 		break;
742 	case MLD2_CHANGE_TO_INCLUDE:
743 		to_report = true;
744 		fallthrough;
745 	case MLD2_MODE_IS_INCLUDE:
746 		changed = br_multicast_eht_inc(pg, h_addr, srcs, nsrcs,
747 					       sizeof(struct in6_addr),
748 					       to_report);
749 		break;
750 	case MLD2_CHANGE_TO_EXCLUDE:
751 		to_report = true;
752 		fallthrough;
753 	case MLD2_MODE_IS_EXCLUDE:
754 		changed = br_multicast_eht_exc(pg, h_addr, srcs, nsrcs,
755 					       sizeof(struct in6_addr),
756 					       to_report);
757 		break;
758 	}
759 
760 	return changed;
761 }
762 #endif
763 
764 /* true means an entry was deleted */
765 bool br_multicast_eht_handle(struct net_bridge_port_group *pg,
766 			     void *h_addr,
767 			     void *srcs,
768 			     u32 nsrcs,
769 			     size_t addr_size,
770 			     int grec_type)
771 {
772 	bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
773 	union net_bridge_eht_addr eht_host_addr;
774 	bool changed = false;
775 
776 	if (!eht_enabled)
777 		goto out;
778 
779 	memset(&eht_host_addr, 0, sizeof(eht_host_addr));
780 	memcpy(&eht_host_addr, h_addr, addr_size);
781 	if (addr_size == sizeof(__be32))
782 		changed = __eht_ip4_handle(pg, &eht_host_addr, srcs, nsrcs,
783 					   grec_type);
784 #if IS_ENABLED(CONFIG_IPV6)
785 	else
786 		changed = __eht_ip6_handle(pg, &eht_host_addr, srcs, nsrcs,
787 					   grec_type);
788 #endif
789 
790 out:
791 	return changed;
792 }
793 
794 int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
795 				     u32 eht_hosts_limit)
796 {
797 	struct net_bridge *br = p->br;
798 
799 	if (!eht_hosts_limit)
800 		return -EINVAL;
801 
802 	spin_lock_bh(&br->multicast_lock);
803 	p->multicast_eht_hosts_limit = eht_hosts_limit;
804 	spin_unlock_bh(&br->multicast_lock);
805 
806 	return 0;
807 }
808