xref: /openbmc/linux/net/bridge/br_multicast_eht.c (revision a1515ec7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/if_ether.h>
6 #include <linux/igmp.h>
7 #include <linux/in.h>
8 #include <linux/jhash.h>
9 #include <linux/kernel.h>
10 #include <linux/log2.h>
11 #include <linux/netdevice.h>
12 #include <linux/netfilter_bridge.h>
13 #include <linux/random.h>
14 #include <linux/rculist.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include <linux/timer.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mroute.h>
20 #include <net/ip.h>
21 #include <net/switchdev.h>
22 #if IS_ENABLED(CONFIG_IPV6)
23 #include <linux/icmpv6.h>
24 #include <net/ipv6.h>
25 #include <net/mld.h>
26 #include <net/ip6_checksum.h>
27 #include <net/addrconf.h>
28 #endif
29 
30 #include "br_private.h"
31 #include "br_private_mcast_eht.h"
32 
33 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
34 					   union net_bridge_eht_addr *src_addr,
35 					   union net_bridge_eht_addr *h_addr);
36 static void br_multicast_create_eht_set_entry(struct net_bridge_port_group *pg,
37 					      union net_bridge_eht_addr *src_addr,
38 					      union net_bridge_eht_addr *h_addr,
39 					      int filter_mode,
40 					      bool allow_zero_src);
41 
42 static struct net_bridge_group_eht_host *
43 br_multicast_eht_host_lookup(struct net_bridge_port_group *pg,
44 			     union net_bridge_eht_addr *h_addr)
45 {
46 	struct rb_node *node = pg->eht_host_tree.rb_node;
47 
48 	while (node) {
49 		struct net_bridge_group_eht_host *this;
50 		int result;
51 
52 		this = rb_entry(node, struct net_bridge_group_eht_host,
53 				rb_node);
54 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
55 		if (result < 0)
56 			node = node->rb_left;
57 		else if (result > 0)
58 			node = node->rb_right;
59 		else
60 			return this;
61 	}
62 
63 	return NULL;
64 }
65 
66 static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group *pg,
67 					     union net_bridge_eht_addr *h_addr)
68 {
69 	struct net_bridge_group_eht_host *eht_host;
70 
71 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
72 	if (!eht_host)
73 		return MCAST_INCLUDE;
74 
75 	return eht_host->filter_mode;
76 }
77 
78 static struct net_bridge_group_eht_set_entry *
79 br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set *eht_set,
80 				  union net_bridge_eht_addr *h_addr)
81 {
82 	struct rb_node *node = eht_set->entry_tree.rb_node;
83 
84 	while (node) {
85 		struct net_bridge_group_eht_set_entry *this;
86 		int result;
87 
88 		this = rb_entry(node, struct net_bridge_group_eht_set_entry,
89 				rb_node);
90 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
91 		if (result < 0)
92 			node = node->rb_left;
93 		else if (result > 0)
94 			node = node->rb_right;
95 		else
96 			return this;
97 	}
98 
99 	return NULL;
100 }
101 
102 static struct net_bridge_group_eht_set *
103 br_multicast_eht_set_lookup(struct net_bridge_port_group *pg,
104 			    union net_bridge_eht_addr *src_addr)
105 {
106 	struct rb_node *node = pg->eht_set_tree.rb_node;
107 
108 	while (node) {
109 		struct net_bridge_group_eht_set *this;
110 		int result;
111 
112 		this = rb_entry(node, struct net_bridge_group_eht_set,
113 				rb_node);
114 		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
115 		if (result < 0)
116 			node = node->rb_left;
117 		else if (result > 0)
118 			node = node->rb_right;
119 		else
120 			return this;
121 	}
122 
123 	return NULL;
124 }
125 
126 static void __eht_destroy_host(struct net_bridge_group_eht_host *eht_host)
127 {
128 	WARN_ON(!hlist_empty(&eht_host->set_entries));
129 
130 	br_multicast_eht_hosts_dec(eht_host->pg);
131 
132 	rb_erase(&eht_host->rb_node, &eht_host->pg->eht_host_tree);
133 	RB_CLEAR_NODE(&eht_host->rb_node);
134 	kfree(eht_host);
135 }
136 
137 static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
138 {
139 	struct net_bridge_group_eht_set_entry *set_h;
140 
141 	set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
142 	WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
143 
144 	del_timer_sync(&set_h->timer);
145 	kfree(set_h);
146 }
147 
148 static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
149 {
150 	struct net_bridge_group_eht_set *eht_set;
151 
152 	eht_set = container_of(gc, struct net_bridge_group_eht_set, mcast_gc);
153 	WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
154 	WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
155 
156 	del_timer_sync(&eht_set->timer);
157 	kfree(eht_set);
158 }
159 
160 static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry *set_h)
161 {
162 	struct net_bridge_group_eht_host *eht_host = set_h->h_parent;
163 	union net_bridge_eht_addr zero_addr;
164 
165 	rb_erase(&set_h->rb_node, &set_h->eht_set->entry_tree);
166 	RB_CLEAR_NODE(&set_h->rb_node);
167 	hlist_del_init(&set_h->host_list);
168 	memset(&zero_addr, 0, sizeof(zero_addr));
169 	if (memcmp(&set_h->h_addr, &zero_addr, sizeof(zero_addr)))
170 		eht_host->num_entries--;
171 	hlist_add_head(&set_h->mcast_gc.gc_node, &set_h->br->mcast_gc_list);
172 	queue_work(system_long_wq, &set_h->br->mcast_gc_work);
173 
174 	if (hlist_empty(&eht_host->set_entries))
175 		__eht_destroy_host(eht_host);
176 }
177 
178 static void br_multicast_del_eht_set(struct net_bridge_group_eht_set *eht_set)
179 {
180 	struct net_bridge_group_eht_set_entry *set_h;
181 	struct rb_node *node;
182 
183 	while ((node = rb_first(&eht_set->entry_tree))) {
184 		set_h = rb_entry(node, struct net_bridge_group_eht_set_entry,
185 				 rb_node);
186 		__eht_del_set_entry(set_h);
187 	}
188 
189 	rb_erase(&eht_set->rb_node, &eht_set->pg->eht_set_tree);
190 	RB_CLEAR_NODE(&eht_set->rb_node);
191 	hlist_add_head(&eht_set->mcast_gc.gc_node, &eht_set->br->mcast_gc_list);
192 	queue_work(system_long_wq, &eht_set->br->mcast_gc_work);
193 }
194 
195 void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg)
196 {
197 	struct net_bridge_group_eht_set *eht_set;
198 	struct rb_node *node;
199 
200 	while ((node = rb_first(&pg->eht_set_tree))) {
201 		eht_set = rb_entry(node, struct net_bridge_group_eht_set,
202 				   rb_node);
203 		br_multicast_del_eht_set(eht_set);
204 	}
205 }
206 
207 static void br_multicast_eht_set_entry_expired(struct timer_list *t)
208 {
209 	struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer);
210 	struct net_bridge *br = set_h->br;
211 
212 	spin_lock(&br->multicast_lock);
213 	if (RB_EMPTY_NODE(&set_h->rb_node) || timer_pending(&set_h->timer))
214 		goto out;
215 
216 	br_multicast_del_eht_set_entry(set_h->eht_set->pg,
217 				       &set_h->eht_set->src_addr,
218 				       &set_h->h_addr);
219 out:
220 	spin_unlock(&br->multicast_lock);
221 }
222 
223 static void br_multicast_eht_set_expired(struct timer_list *t)
224 {
225 	struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t,
226 							      timer);
227 	struct net_bridge *br = eht_set->br;
228 
229 	spin_lock(&br->multicast_lock);
230 	if (RB_EMPTY_NODE(&eht_set->rb_node) || timer_pending(&eht_set->timer))
231 		goto out;
232 
233 	br_multicast_del_eht_set(eht_set);
234 out:
235 	spin_unlock(&br->multicast_lock);
236 }
237 
238 static struct net_bridge_group_eht_host *
239 __eht_lookup_create_host(struct net_bridge_port_group *pg,
240 			 union net_bridge_eht_addr *h_addr,
241 			 unsigned char filter_mode)
242 {
243 	struct rb_node **link = &pg->eht_host_tree.rb_node, *parent = NULL;
244 	struct net_bridge_group_eht_host *eht_host;
245 
246 	while (*link) {
247 		struct net_bridge_group_eht_host *this;
248 		int result;
249 
250 		this = rb_entry(*link, struct net_bridge_group_eht_host,
251 				rb_node);
252 		result = memcmp(h_addr, &this->h_addr, sizeof(*h_addr));
253 		parent = *link;
254 		if (result < 0)
255 			link = &((*link)->rb_left);
256 		else if (result > 0)
257 			link = &((*link)->rb_right);
258 		else
259 			return this;
260 	}
261 
262 	if (br_multicast_eht_hosts_over_limit(pg))
263 		return NULL;
264 
265 	eht_host = kzalloc(sizeof(*eht_host), GFP_ATOMIC);
266 	if (!eht_host)
267 		return NULL;
268 
269 	memcpy(&eht_host->h_addr, h_addr, sizeof(*h_addr));
270 	INIT_HLIST_HEAD(&eht_host->set_entries);
271 	eht_host->pg = pg;
272 	eht_host->filter_mode = filter_mode;
273 
274 	rb_link_node(&eht_host->rb_node, parent, link);
275 	rb_insert_color(&eht_host->rb_node, &pg->eht_host_tree);
276 
277 	br_multicast_eht_hosts_inc(pg);
278 
279 	return eht_host;
280 }
281 
282 static struct net_bridge_group_eht_set_entry *
283 __eht_lookup_create_set_entry(struct net_bridge *br,
284 			      struct net_bridge_group_eht_set *eht_set,
285 			      struct net_bridge_group_eht_host *eht_host,
286 			      bool allow_zero_src)
287 {
288 	struct rb_node **link = &eht_set->entry_tree.rb_node, *parent = NULL;
289 	struct net_bridge_group_eht_set_entry *set_h;
290 
291 	while (*link) {
292 		struct net_bridge_group_eht_set_entry *this;
293 		int result;
294 
295 		this = rb_entry(*link, struct net_bridge_group_eht_set_entry,
296 				rb_node);
297 		result = memcmp(&eht_host->h_addr, &this->h_addr,
298 				sizeof(union net_bridge_eht_addr));
299 		parent = *link;
300 		if (result < 0)
301 			link = &((*link)->rb_left);
302 		else if (result > 0)
303 			link = &((*link)->rb_right);
304 		else
305 			return this;
306 	}
307 
308 	/* always allow auto-created zero entry */
309 	if (!allow_zero_src && eht_host->num_entries >= PG_SRC_ENT_LIMIT)
310 		return NULL;
311 
312 	set_h = kzalloc(sizeof(*set_h), GFP_ATOMIC);
313 	if (!set_h)
314 		return NULL;
315 
316 	memcpy(&set_h->h_addr, &eht_host->h_addr,
317 	       sizeof(union net_bridge_eht_addr));
318 	set_h->mcast_gc.destroy = br_multicast_destroy_eht_set_entry;
319 	set_h->eht_set = eht_set;
320 	set_h->h_parent = eht_host;
321 	set_h->br = br;
322 	timer_setup(&set_h->timer, br_multicast_eht_set_entry_expired, 0);
323 
324 	hlist_add_head(&set_h->host_list, &eht_host->set_entries);
325 	rb_link_node(&set_h->rb_node, parent, link);
326 	rb_insert_color(&set_h->rb_node, &eht_set->entry_tree);
327 	/* we must not count the auto-created zero entry otherwise we won't be
328 	 * able to track the full list of PG_SRC_ENT_LIMIT entries
329 	 */
330 	if (!allow_zero_src)
331 		eht_host->num_entries++;
332 
333 	return set_h;
334 }
335 
336 static struct net_bridge_group_eht_set *
337 __eht_lookup_create_set(struct net_bridge_port_group *pg,
338 			union net_bridge_eht_addr *src_addr)
339 {
340 	struct rb_node **link = &pg->eht_set_tree.rb_node, *parent = NULL;
341 	struct net_bridge_group_eht_set *eht_set;
342 
343 	while (*link) {
344 		struct net_bridge_group_eht_set *this;
345 		int result;
346 
347 		this = rb_entry(*link, struct net_bridge_group_eht_set,
348 				rb_node);
349 		result = memcmp(src_addr, &this->src_addr, sizeof(*src_addr));
350 		parent = *link;
351 		if (result < 0)
352 			link = &((*link)->rb_left);
353 		else if (result > 0)
354 			link = &((*link)->rb_right);
355 		else
356 			return this;
357 	}
358 
359 	eht_set = kzalloc(sizeof(*eht_set), GFP_ATOMIC);
360 	if (!eht_set)
361 		return NULL;
362 
363 	memcpy(&eht_set->src_addr, src_addr, sizeof(*src_addr));
364 	eht_set->mcast_gc.destroy = br_multicast_destroy_eht_set;
365 	eht_set->pg = pg;
366 	eht_set->br = pg->key.port->br;
367 	eht_set->entry_tree = RB_ROOT;
368 	timer_setup(&eht_set->timer, br_multicast_eht_set_expired, 0);
369 
370 	rb_link_node(&eht_set->rb_node, parent, link);
371 	rb_insert_color(&eht_set->rb_node, &pg->eht_set_tree);
372 
373 	return eht_set;
374 }
375 
376 static void br_multicast_ip_src_to_eht_addr(const struct br_ip *src,
377 					    union net_bridge_eht_addr *dest)
378 {
379 	switch (src->proto) {
380 	case htons(ETH_P_IP):
381 		dest->ip4 = src->src.ip4;
382 		break;
383 #if IS_ENABLED(CONFIG_IPV6)
384 	case htons(ETH_P_IPV6):
385 		memcpy(&dest->ip6, &src->src.ip6, sizeof(struct in6_addr));
386 		break;
387 #endif
388 	}
389 }
390 
391 static void br_eht_convert_host_filter_mode(struct net_bridge_port_group *pg,
392 					    union net_bridge_eht_addr *h_addr,
393 					    int filter_mode)
394 {
395 	struct net_bridge_group_eht_host *eht_host;
396 	union net_bridge_eht_addr zero_addr;
397 
398 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
399 	if (eht_host)
400 		eht_host->filter_mode = filter_mode;
401 
402 	memset(&zero_addr, 0, sizeof(zero_addr));
403 	switch (filter_mode) {
404 	case MCAST_INCLUDE:
405 		br_multicast_del_eht_set_entry(pg, &zero_addr, h_addr);
406 		break;
407 	case MCAST_EXCLUDE:
408 		br_multicast_create_eht_set_entry(pg, &zero_addr, h_addr,
409 						  MCAST_EXCLUDE,
410 						  true);
411 		break;
412 	}
413 }
414 
415 static void br_multicast_create_eht_set_entry(struct net_bridge_port_group *pg,
416 					      union net_bridge_eht_addr *src_addr,
417 					      union net_bridge_eht_addr *h_addr,
418 					      int filter_mode,
419 					      bool allow_zero_src)
420 {
421 	struct net_bridge_group_eht_set_entry *set_h;
422 	struct net_bridge_group_eht_host *eht_host;
423 	struct net_bridge *br = pg->key.port->br;
424 	struct net_bridge_group_eht_set *eht_set;
425 	union net_bridge_eht_addr zero_addr;
426 
427 	memset(&zero_addr, 0, sizeof(zero_addr));
428 	if (!allow_zero_src && !memcmp(src_addr, &zero_addr, sizeof(zero_addr)))
429 		return;
430 
431 	eht_set = __eht_lookup_create_set(pg, src_addr);
432 	if (!eht_set)
433 		return;
434 
435 	eht_host = __eht_lookup_create_host(pg, h_addr, filter_mode);
436 	if (!eht_host)
437 		goto fail_host;
438 
439 	set_h = __eht_lookup_create_set_entry(br, eht_set, eht_host,
440 					      allow_zero_src);
441 	if (!set_h)
442 		goto fail_set_entry;
443 
444 	mod_timer(&set_h->timer, jiffies + br_multicast_gmi(br));
445 	mod_timer(&eht_set->timer, jiffies + br_multicast_gmi(br));
446 
447 	return;
448 
449 fail_set_entry:
450 	if (hlist_empty(&eht_host->set_entries))
451 		__eht_destroy_host(eht_host);
452 fail_host:
453 	if (RB_EMPTY_ROOT(&eht_set->entry_tree))
454 		br_multicast_del_eht_set(eht_set);
455 }
456 
457 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group *pg,
458 					   union net_bridge_eht_addr *src_addr,
459 					   union net_bridge_eht_addr *h_addr)
460 {
461 	struct net_bridge_group_eht_set_entry *set_h;
462 	struct net_bridge_group_eht_set *eht_set;
463 	bool set_deleted = false;
464 
465 	eht_set = br_multicast_eht_set_lookup(pg, src_addr);
466 	if (!eht_set)
467 		goto out;
468 
469 	set_h = br_multicast_eht_set_entry_lookup(eht_set, h_addr);
470 	if (!set_h)
471 		goto out;
472 
473 	__eht_del_set_entry(set_h);
474 
475 	if (RB_EMPTY_ROOT(&eht_set->entry_tree)) {
476 		br_multicast_del_eht_set(eht_set);
477 		set_deleted = true;
478 	}
479 
480 out:
481 	return set_deleted;
482 }
483 
484 static void br_multicast_del_eht_host(struct net_bridge_port_group *pg,
485 				      union net_bridge_eht_addr *h_addr)
486 {
487 	struct net_bridge_group_eht_set_entry *set_h;
488 	struct net_bridge_group_eht_host *eht_host;
489 	struct hlist_node *tmp;
490 
491 	eht_host = br_multicast_eht_host_lookup(pg, h_addr);
492 	if (!eht_host)
493 		return;
494 
495 	hlist_for_each_entry_safe(set_h, tmp, &eht_host->set_entries, host_list)
496 		br_multicast_del_eht_set_entry(set_h->eht_set->pg,
497 					       &set_h->eht_set->src_addr,
498 					       &set_h->h_addr);
499 }
500 
501 static void __eht_allow_incl(struct net_bridge_port_group *pg,
502 			     union net_bridge_eht_addr *h_addr,
503 			     void *srcs,
504 			     u32 nsrcs,
505 			     size_t addr_size)
506 {
507 	union net_bridge_eht_addr eht_src_addr;
508 	u32 src_idx;
509 
510 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
511 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
512 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
513 		br_multicast_create_eht_set_entry(pg, &eht_src_addr, h_addr,
514 						  MCAST_INCLUDE,
515 						  false);
516 	}
517 }
518 
519 static bool __eht_allow_excl(struct net_bridge_port_group *pg,
520 			     union net_bridge_eht_addr *h_addr,
521 			     void *srcs,
522 			     u32 nsrcs,
523 			     size_t addr_size)
524 {
525 	bool changed = false, host_excl = false;
526 	union net_bridge_eht_addr eht_src_addr;
527 	struct net_bridge_group_src *src_ent;
528 	struct br_ip src_ip;
529 	u32 src_idx;
530 
531 	host_excl = !!(br_multicast_eht_host_filter_mode(pg, h_addr) == MCAST_EXCLUDE);
532 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
533 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
534 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
535 		if (!host_excl) {
536 			br_multicast_create_eht_set_entry(pg, &eht_src_addr, h_addr,
537 							  MCAST_INCLUDE,
538 							  false);
539 		} else {
540 			if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr,
541 							    h_addr))
542 				continue;
543 			memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
544 			src_ent = br_multicast_find_group_src(pg, &src_ip);
545 			if (!src_ent)
546 				continue;
547 			br_multicast_del_group_src(src_ent, true);
548 			changed = true;
549 		}
550 	}
551 
552 	return changed;
553 }
554 
555 static bool br_multicast_eht_allow(struct net_bridge_port_group *pg,
556 				   union net_bridge_eht_addr *h_addr,
557 				   void *srcs,
558 				   u32 nsrcs,
559 				   size_t addr_size)
560 {
561 	bool changed = false;
562 
563 	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
564 	case MCAST_INCLUDE:
565 		__eht_allow_incl(pg, h_addr, srcs, nsrcs, addr_size);
566 		break;
567 	case MCAST_EXCLUDE:
568 		changed = __eht_allow_excl(pg, h_addr, srcs, nsrcs, addr_size);
569 		break;
570 	}
571 
572 	return changed;
573 }
574 
575 static bool __eht_block_incl(struct net_bridge_port_group *pg,
576 			     union net_bridge_eht_addr *h_addr,
577 			     void *srcs,
578 			     u32 nsrcs,
579 			     size_t addr_size)
580 {
581 	union net_bridge_eht_addr eht_src_addr;
582 	struct net_bridge_group_src *src_ent;
583 	bool changed = false;
584 	struct br_ip src_ip;
585 	u32 src_idx;
586 
587 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
588 	memset(&src_ip, 0, sizeof(src_ip));
589 	src_ip.proto = pg->key.addr.proto;
590 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
591 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
592 		if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr, h_addr))
593 			continue;
594 		memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
595 		src_ent = br_multicast_find_group_src(pg, &src_ip);
596 		if (!src_ent)
597 			continue;
598 		br_multicast_del_group_src(src_ent, true);
599 		changed = true;
600 	}
601 
602 	return changed;
603 }
604 
605 static bool __eht_block_excl(struct net_bridge_port_group *pg,
606 			     union net_bridge_eht_addr *h_addr,
607 			     void *srcs,
608 			     u32 nsrcs,
609 			     size_t addr_size)
610 {
611 	bool changed = false, host_excl = false;
612 	union net_bridge_eht_addr eht_src_addr;
613 	struct net_bridge_group_src *src_ent;
614 	struct br_ip src_ip;
615 	u32 src_idx;
616 
617 	host_excl = !!(br_multicast_eht_host_filter_mode(pg, h_addr) == MCAST_EXCLUDE);
618 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
619 	memset(&src_ip, 0, sizeof(src_ip));
620 	src_ip.proto = pg->key.addr.proto;
621 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
622 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
623 		if (host_excl) {
624 			br_multicast_create_eht_set_entry(pg, &eht_src_addr, h_addr,
625 							  MCAST_EXCLUDE,
626 							  false);
627 		} else {
628 			if (!br_multicast_del_eht_set_entry(pg, &eht_src_addr,
629 							    h_addr))
630 				continue;
631 			memcpy(&src_ip, srcs + (src_idx * addr_size), addr_size);
632 			src_ent = br_multicast_find_group_src(pg, &src_ip);
633 			if (!src_ent)
634 				continue;
635 			br_multicast_del_group_src(src_ent, true);
636 			changed = true;
637 		}
638 	}
639 
640 	return changed;
641 }
642 
643 static bool br_multicast_eht_block(struct net_bridge_port_group *pg,
644 				   union net_bridge_eht_addr *h_addr,
645 				   void *srcs,
646 				   u32 nsrcs,
647 				   size_t addr_size)
648 {
649 	bool changed = false;
650 
651 	switch (br_multicast_eht_host_filter_mode(pg, h_addr)) {
652 	case MCAST_INCLUDE:
653 		changed = __eht_block_incl(pg, h_addr, srcs, nsrcs, addr_size);
654 		break;
655 	case MCAST_EXCLUDE:
656 		changed = __eht_block_excl(pg, h_addr, srcs, nsrcs, addr_size);
657 		break;
658 	}
659 
660 	return changed;
661 }
662 
663 /* flush_entries is true when changing mode */
664 static bool __eht_inc_exc(struct net_bridge_port_group *pg,
665 			  union net_bridge_eht_addr *h_addr,
666 			  void *srcs,
667 			  u32 nsrcs,
668 			  size_t addr_size,
669 			  unsigned char filter_mode,
670 			  bool to_report)
671 {
672 	bool changed = false, flush_entries = to_report;
673 	union net_bridge_eht_addr eht_src_addr;
674 	u32 src_idx;
675 
676 	if (br_multicast_eht_host_filter_mode(pg, h_addr) != filter_mode)
677 		flush_entries = true;
678 
679 	memset(&eht_src_addr, 0, sizeof(eht_src_addr));
680 	/* if we're changing mode del host and its entries */
681 	if (flush_entries)
682 		br_multicast_del_eht_host(pg, h_addr);
683 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
684 		memcpy(&eht_src_addr, srcs + (src_idx * addr_size), addr_size);
685 		br_multicast_create_eht_set_entry(pg, &eht_src_addr, h_addr,
686 						  filter_mode, false);
687 	}
688 	/* we can be missing sets only if we've deleted some entries */
689 	if (flush_entries) {
690 		struct net_bridge *br = pg->key.port->br;
691 		struct net_bridge_group_eht_set *eht_set;
692 		struct net_bridge_group_src *src_ent;
693 		struct hlist_node *tmp;
694 
695 		hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
696 			br_multicast_ip_src_to_eht_addr(&src_ent->addr,
697 							&eht_src_addr);
698 			if (!br_multicast_eht_set_lookup(pg, &eht_src_addr)) {
699 				br_multicast_del_group_src(src_ent, true);
700 				changed = true;
701 				continue;
702 			}
703 			/* this is an optimization for TO_INCLUDE where we lower
704 			 * the set's timeout to LMQT to catch timeout hosts:
705 			 * - host A (timing out): set entries X, Y
706 			 * - host B: set entry Z (new from current TO_INCLUDE)
707 			 *           sends BLOCK Z after LMQT but host A's EHT
708 			 *           entries still exist (unless lowered to LMQT
709 			 *           so they can timeout with the S,Gs)
710 			 * => we wait another LMQT, when we can just delete the
711 			 *    group immediately
712 			 */
713 			if (!(src_ent->flags & BR_SGRP_F_SEND) ||
714 			    filter_mode != MCAST_INCLUDE ||
715 			    !to_report)
716 				continue;
717 			eht_set = br_multicast_eht_set_lookup(pg,
718 							      &eht_src_addr);
719 			if (!eht_set)
720 				continue;
721 			mod_timer(&eht_set->timer, jiffies + br_multicast_lmqt(br));
722 		}
723 	}
724 
725 	return changed;
726 }
727 
728 static bool br_multicast_eht_inc(struct net_bridge_port_group *pg,
729 				 union net_bridge_eht_addr *h_addr,
730 				 void *srcs,
731 				 u32 nsrcs,
732 				 size_t addr_size,
733 				 bool to_report)
734 {
735 	bool changed;
736 
737 	changed = __eht_inc_exc(pg, h_addr, srcs, nsrcs, addr_size,
738 				MCAST_INCLUDE, to_report);
739 	br_eht_convert_host_filter_mode(pg, h_addr, MCAST_INCLUDE);
740 
741 	return changed;
742 }
743 
744 static bool br_multicast_eht_exc(struct net_bridge_port_group *pg,
745 				 union net_bridge_eht_addr *h_addr,
746 				 void *srcs,
747 				 u32 nsrcs,
748 				 size_t addr_size,
749 				 bool to_report)
750 {
751 	bool changed;
752 
753 	changed = __eht_inc_exc(pg, h_addr, srcs, nsrcs, addr_size,
754 				MCAST_EXCLUDE, to_report);
755 	br_eht_convert_host_filter_mode(pg, h_addr, MCAST_EXCLUDE);
756 
757 	return changed;
758 }
759 
760 static bool __eht_ip4_handle(struct net_bridge_port_group *pg,
761 			     union net_bridge_eht_addr *h_addr,
762 			     void *srcs,
763 			     u32 nsrcs,
764 			     int grec_type)
765 {
766 	bool changed = false, to_report = false;
767 
768 	switch (grec_type) {
769 	case IGMPV3_ALLOW_NEW_SOURCES:
770 		br_multicast_eht_allow(pg, h_addr, srcs, nsrcs, sizeof(__be32));
771 		break;
772 	case IGMPV3_BLOCK_OLD_SOURCES:
773 		changed = br_multicast_eht_block(pg, h_addr, srcs, nsrcs,
774 						 sizeof(__be32));
775 		break;
776 	case IGMPV3_CHANGE_TO_INCLUDE:
777 		to_report = true;
778 		fallthrough;
779 	case IGMPV3_MODE_IS_INCLUDE:
780 		changed = br_multicast_eht_inc(pg, h_addr, srcs, nsrcs,
781 					       sizeof(__be32), to_report);
782 		break;
783 	case IGMPV3_CHANGE_TO_EXCLUDE:
784 		to_report = true;
785 		fallthrough;
786 	case IGMPV3_MODE_IS_EXCLUDE:
787 		changed = br_multicast_eht_exc(pg, h_addr, srcs, nsrcs,
788 					       sizeof(__be32), to_report);
789 		break;
790 	}
791 
792 	return changed;
793 }
794 
795 #if IS_ENABLED(CONFIG_IPV6)
796 static bool __eht_ip6_handle(struct net_bridge_port_group *pg,
797 			     union net_bridge_eht_addr *h_addr,
798 			     void *srcs,
799 			     u32 nsrcs,
800 			     int grec_type)
801 {
802 	bool changed = false, to_report = false;
803 
804 	switch (grec_type) {
805 	case MLD2_ALLOW_NEW_SOURCES:
806 		br_multicast_eht_allow(pg, h_addr, srcs, nsrcs,
807 				       sizeof(struct in6_addr));
808 		break;
809 	case MLD2_BLOCK_OLD_SOURCES:
810 		changed = br_multicast_eht_block(pg, h_addr, srcs, nsrcs,
811 						 sizeof(struct in6_addr));
812 		break;
813 	case MLD2_CHANGE_TO_INCLUDE:
814 		to_report = true;
815 		fallthrough;
816 	case MLD2_MODE_IS_INCLUDE:
817 		changed = br_multicast_eht_inc(pg, h_addr, srcs, nsrcs,
818 					       sizeof(struct in6_addr),
819 					       to_report);
820 		break;
821 	case MLD2_CHANGE_TO_EXCLUDE:
822 		to_report = true;
823 		fallthrough;
824 	case MLD2_MODE_IS_EXCLUDE:
825 		changed = br_multicast_eht_exc(pg, h_addr, srcs, nsrcs,
826 					       sizeof(struct in6_addr),
827 					       to_report);
828 		break;
829 	}
830 
831 	return changed;
832 }
833 #endif
834 
835 /* true means an entry was deleted */
836 bool br_multicast_eht_handle(struct net_bridge_port_group *pg,
837 			     void *h_addr,
838 			     void *srcs,
839 			     u32 nsrcs,
840 			     size_t addr_size,
841 			     int grec_type)
842 {
843 	bool eht_enabled = !!(pg->key.port->flags & BR_MULTICAST_FAST_LEAVE);
844 	union net_bridge_eht_addr eht_host_addr;
845 	bool changed = false;
846 
847 	if (!eht_enabled)
848 		goto out;
849 
850 	memset(&eht_host_addr, 0, sizeof(eht_host_addr));
851 	memcpy(&eht_host_addr, h_addr, addr_size);
852 	if (addr_size == sizeof(__be32))
853 		changed = __eht_ip4_handle(pg, &eht_host_addr, srcs, nsrcs,
854 					   grec_type);
855 #if IS_ENABLED(CONFIG_IPV6)
856 	else
857 		changed = __eht_ip6_handle(pg, &eht_host_addr, srcs, nsrcs,
858 					   grec_type);
859 #endif
860 
861 out:
862 	return changed;
863 }
864 
865 int br_multicast_eht_set_hosts_limit(struct net_bridge_port *p,
866 				     u32 eht_hosts_limit)
867 {
868 	struct net_bridge *br = p->br;
869 
870 	if (!eht_hosts_limit)
871 		return -EINVAL;
872 
873 	spin_lock_bh(&br->multicast_lock);
874 	p->multicast_eht_hosts_limit = eht_hosts_limit;
875 	spin_unlock_bh(&br->multicast_lock);
876 
877 	return 0;
878 }
879