xref: /openbmc/linux/net/bridge/br_multicast.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34 
35 #include "br_private.h"
36 
37 static const struct rhashtable_params br_mdb_rht_params = {
38 	.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39 	.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40 	.key_len = sizeof(struct br_ip),
41 	.automatic_shrinking = true,
42 };
43 
44 static void br_multicast_start_querier(struct net_bridge *br,
45 				       struct bridge_mcast_own_query *query);
46 static void br_multicast_add_router(struct net_bridge *br,
47 				    struct net_bridge_port *port);
48 static void br_ip4_multicast_leave_group(struct net_bridge *br,
49 					 struct net_bridge_port *port,
50 					 __be32 group,
51 					 __u16 vid,
52 					 const unsigned char *src);
53 static void br_multicast_port_group_rexmit(struct timer_list *t);
54 
55 static void __del_port_router(struct net_bridge_port *p);
56 #if IS_ENABLED(CONFIG_IPV6)
57 static void br_ip6_multicast_leave_group(struct net_bridge *br,
58 					 struct net_bridge_port *port,
59 					 const struct in6_addr *group,
60 					 __u16 vid, const unsigned char *src);
61 #endif
62 
63 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
64 						      struct br_ip *dst)
65 {
66 	return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
67 }
68 
69 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
70 					   struct br_ip *dst)
71 {
72 	struct net_bridge_mdb_entry *ent;
73 
74 	lockdep_assert_held_once(&br->multicast_lock);
75 
76 	rcu_read_lock();
77 	ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
78 	rcu_read_unlock();
79 
80 	return ent;
81 }
82 
83 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
84 						   __be32 dst, __u16 vid)
85 {
86 	struct br_ip br_dst;
87 
88 	memset(&br_dst, 0, sizeof(br_dst));
89 	br_dst.u.ip4 = dst;
90 	br_dst.proto = htons(ETH_P_IP);
91 	br_dst.vid = vid;
92 
93 	return br_mdb_ip_get(br, &br_dst);
94 }
95 
96 #if IS_ENABLED(CONFIG_IPV6)
97 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
98 						   const struct in6_addr *dst,
99 						   __u16 vid)
100 {
101 	struct br_ip br_dst;
102 
103 	memset(&br_dst, 0, sizeof(br_dst));
104 	br_dst.u.ip6 = *dst;
105 	br_dst.proto = htons(ETH_P_IPV6);
106 	br_dst.vid = vid;
107 
108 	return br_mdb_ip_get(br, &br_dst);
109 }
110 #endif
111 
112 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
113 					struct sk_buff *skb, u16 vid)
114 {
115 	struct br_ip ip;
116 
117 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
118 		return NULL;
119 
120 	if (BR_INPUT_SKB_CB(skb)->igmp)
121 		return NULL;
122 
123 	memset(&ip, 0, sizeof(ip));
124 	ip.proto = skb->protocol;
125 	ip.vid = vid;
126 
127 	switch (skb->protocol) {
128 	case htons(ETH_P_IP):
129 		ip.u.ip4 = ip_hdr(skb)->daddr;
130 		break;
131 #if IS_ENABLED(CONFIG_IPV6)
132 	case htons(ETH_P_IPV6):
133 		ip.u.ip6 = ipv6_hdr(skb)->daddr;
134 		break;
135 #endif
136 	default:
137 		return NULL;
138 	}
139 
140 	return br_mdb_ip_get_rcu(br, &ip);
141 }
142 
143 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
144 {
145 	struct net_bridge_mdb_entry *mp;
146 
147 	mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
148 	WARN_ON(!hlist_unhashed(&mp->mdb_node));
149 	WARN_ON(mp->ports);
150 
151 	del_timer_sync(&mp->timer);
152 	kfree_rcu(mp, rcu);
153 }
154 
155 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
156 {
157 	struct net_bridge *br = mp->br;
158 
159 	rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
160 			       br_mdb_rht_params);
161 	hlist_del_init_rcu(&mp->mdb_node);
162 	hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
163 	queue_work(system_long_wq, &br->mcast_gc_work);
164 }
165 
166 static void br_multicast_group_expired(struct timer_list *t)
167 {
168 	struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
169 	struct net_bridge *br = mp->br;
170 
171 	spin_lock(&br->multicast_lock);
172 	if (!netif_running(br->dev) || timer_pending(&mp->timer))
173 		goto out;
174 
175 	br_multicast_host_leave(mp, true);
176 
177 	if (mp->ports)
178 		goto out;
179 	br_multicast_del_mdb_entry(mp);
180 out:
181 	spin_unlock(&br->multicast_lock);
182 }
183 
184 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
185 {
186 	struct net_bridge_group_src *src;
187 
188 	src = container_of(gc, struct net_bridge_group_src, mcast_gc);
189 	WARN_ON(!hlist_unhashed(&src->node));
190 
191 	del_timer_sync(&src->timer);
192 	kfree_rcu(src, rcu);
193 }
194 
195 static void br_multicast_del_group_src(struct net_bridge_group_src *src)
196 {
197 	struct net_bridge *br = src->pg->port->br;
198 
199 	hlist_del_init_rcu(&src->node);
200 	src->pg->src_ents--;
201 	hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
202 	queue_work(system_long_wq, &br->mcast_gc_work);
203 }
204 
205 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
206 {
207 	struct net_bridge_port_group *pg;
208 
209 	pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
210 	WARN_ON(!hlist_unhashed(&pg->mglist));
211 	WARN_ON(!hlist_empty(&pg->src_list));
212 
213 	del_timer_sync(&pg->rexmit_timer);
214 	del_timer_sync(&pg->timer);
215 	kfree_rcu(pg, rcu);
216 }
217 
218 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
219 			 struct net_bridge_port_group *pg,
220 			 struct net_bridge_port_group __rcu **pp)
221 {
222 	struct net_bridge *br = pg->port->br;
223 	struct net_bridge_group_src *ent;
224 	struct hlist_node *tmp;
225 
226 	rcu_assign_pointer(*pp, pg->next);
227 	hlist_del_init(&pg->mglist);
228 	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
229 		br_multicast_del_group_src(ent);
230 	br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
231 	hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
232 	queue_work(system_long_wq, &br->mcast_gc_work);
233 
234 	if (!mp->ports && !mp->host_joined && netif_running(br->dev))
235 		mod_timer(&mp->timer, jiffies);
236 }
237 
238 static void br_multicast_find_del_pg(struct net_bridge *br,
239 				     struct net_bridge_port_group *pg)
240 {
241 	struct net_bridge_port_group __rcu **pp;
242 	struct net_bridge_mdb_entry *mp;
243 	struct net_bridge_port_group *p;
244 
245 	mp = br_mdb_ip_get(br, &pg->addr);
246 	if (WARN_ON(!mp))
247 		return;
248 
249 	for (pp = &mp->ports;
250 	     (p = mlock_dereference(*pp, br)) != NULL;
251 	     pp = &p->next) {
252 		if (p != pg)
253 			continue;
254 
255 		br_multicast_del_pg(mp, pg, pp);
256 		return;
257 	}
258 
259 	WARN_ON(1);
260 }
261 
262 static void br_multicast_port_group_expired(struct timer_list *t)
263 {
264 	struct net_bridge_port_group *pg = from_timer(pg, t, timer);
265 	struct net_bridge_group_src *src_ent;
266 	struct net_bridge *br = pg->port->br;
267 	struct hlist_node *tmp;
268 	bool changed;
269 
270 	spin_lock(&br->multicast_lock);
271 	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
272 	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
273 		goto out;
274 
275 	changed = !!(pg->filter_mode == MCAST_EXCLUDE);
276 	pg->filter_mode = MCAST_INCLUDE;
277 	hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
278 		if (!timer_pending(&src_ent->timer)) {
279 			br_multicast_del_group_src(src_ent);
280 			changed = true;
281 		}
282 	}
283 
284 	if (hlist_empty(&pg->src_list)) {
285 		br_multicast_find_del_pg(br, pg);
286 	} else if (changed) {
287 		struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->addr);
288 
289 		if (WARN_ON(!mp))
290 			goto out;
291 		br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
292 	}
293 out:
294 	spin_unlock(&br->multicast_lock);
295 }
296 
297 static void br_multicast_gc(struct hlist_head *head)
298 {
299 	struct net_bridge_mcast_gc *gcent;
300 	struct hlist_node *tmp;
301 
302 	hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
303 		hlist_del_init(&gcent->gc_node);
304 		gcent->destroy(gcent);
305 	}
306 }
307 
308 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
309 						    struct net_bridge_port_group *pg,
310 						    __be32 ip_dst, __be32 group,
311 						    bool with_srcs, bool over_lmqt,
312 						    u8 sflag, u8 *igmp_type,
313 						    bool *need_rexmit)
314 {
315 	struct net_bridge_port *p = pg ? pg->port : NULL;
316 	struct net_bridge_group_src *ent;
317 	size_t pkt_size, igmp_hdr_size;
318 	unsigned long now = jiffies;
319 	struct igmpv3_query *ihv3;
320 	void *csum_start = NULL;
321 	__sum16 *csum = NULL;
322 	struct sk_buff *skb;
323 	struct igmphdr *ih;
324 	struct ethhdr *eth;
325 	unsigned long lmqt;
326 	struct iphdr *iph;
327 	u16 lmqt_srcs = 0;
328 
329 	igmp_hdr_size = sizeof(*ih);
330 	if (br->multicast_igmp_version == 3) {
331 		igmp_hdr_size = sizeof(*ihv3);
332 		if (pg && with_srcs) {
333 			lmqt = now + (br->multicast_last_member_interval *
334 				      br->multicast_last_member_count);
335 			hlist_for_each_entry(ent, &pg->src_list, node) {
336 				if (over_lmqt == time_after(ent->timer.expires,
337 							    lmqt) &&
338 				    ent->src_query_rexmit_cnt > 0)
339 					lmqt_srcs++;
340 			}
341 
342 			if (!lmqt_srcs)
343 				return NULL;
344 			igmp_hdr_size += lmqt_srcs * sizeof(__be32);
345 		}
346 	}
347 
348 	pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
349 	if ((p && pkt_size > p->dev->mtu) ||
350 	    pkt_size > br->dev->mtu)
351 		return NULL;
352 
353 	skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
354 	if (!skb)
355 		goto out;
356 
357 	skb->protocol = htons(ETH_P_IP);
358 
359 	skb_reset_mac_header(skb);
360 	eth = eth_hdr(skb);
361 
362 	ether_addr_copy(eth->h_source, br->dev->dev_addr);
363 	ip_eth_mc_map(ip_dst, eth->h_dest);
364 	eth->h_proto = htons(ETH_P_IP);
365 	skb_put(skb, sizeof(*eth));
366 
367 	skb_set_network_header(skb, skb->len);
368 	iph = ip_hdr(skb);
369 	iph->tot_len = htons(pkt_size - sizeof(*eth));
370 
371 	iph->version = 4;
372 	iph->ihl = 6;
373 	iph->tos = 0xc0;
374 	iph->id = 0;
375 	iph->frag_off = htons(IP_DF);
376 	iph->ttl = 1;
377 	iph->protocol = IPPROTO_IGMP;
378 	iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
379 		     inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
380 	iph->daddr = ip_dst;
381 	((u8 *)&iph[1])[0] = IPOPT_RA;
382 	((u8 *)&iph[1])[1] = 4;
383 	((u8 *)&iph[1])[2] = 0;
384 	((u8 *)&iph[1])[3] = 0;
385 	ip_send_check(iph);
386 	skb_put(skb, 24);
387 
388 	skb_set_transport_header(skb, skb->len);
389 	*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
390 
391 	switch (br->multicast_igmp_version) {
392 	case 2:
393 		ih = igmp_hdr(skb);
394 		ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
395 		ih->code = (group ? br->multicast_last_member_interval :
396 				    br->multicast_query_response_interval) /
397 			   (HZ / IGMP_TIMER_SCALE);
398 		ih->group = group;
399 		ih->csum = 0;
400 		csum = &ih->csum;
401 		csum_start = (void *)ih;
402 		break;
403 	case 3:
404 		ihv3 = igmpv3_query_hdr(skb);
405 		ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
406 		ihv3->code = (group ? br->multicast_last_member_interval :
407 				      br->multicast_query_response_interval) /
408 			     (HZ / IGMP_TIMER_SCALE);
409 		ihv3->group = group;
410 		ihv3->qqic = br->multicast_query_interval / HZ;
411 		ihv3->nsrcs = htons(lmqt_srcs);
412 		ihv3->resv = 0;
413 		ihv3->suppress = sflag;
414 		ihv3->qrv = 2;
415 		ihv3->csum = 0;
416 		csum = &ihv3->csum;
417 		csum_start = (void *)ihv3;
418 		if (!pg || !with_srcs)
419 			break;
420 
421 		lmqt_srcs = 0;
422 		hlist_for_each_entry(ent, &pg->src_list, node) {
423 			if (over_lmqt == time_after(ent->timer.expires,
424 						    lmqt) &&
425 			    ent->src_query_rexmit_cnt > 0) {
426 				ihv3->srcs[lmqt_srcs++] = ent->addr.u.ip4;
427 				ent->src_query_rexmit_cnt--;
428 				if (need_rexmit && ent->src_query_rexmit_cnt)
429 					*need_rexmit = true;
430 			}
431 		}
432 		if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
433 			kfree_skb(skb);
434 			return NULL;
435 		}
436 		break;
437 	}
438 
439 	if (WARN_ON(!csum || !csum_start)) {
440 		kfree_skb(skb);
441 		return NULL;
442 	}
443 
444 	*csum = ip_compute_csum(csum_start, igmp_hdr_size);
445 	skb_put(skb, igmp_hdr_size);
446 	__skb_pull(skb, sizeof(*eth));
447 
448 out:
449 	return skb;
450 }
451 
452 #if IS_ENABLED(CONFIG_IPV6)
453 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
454 						    struct net_bridge_port_group *pg,
455 						    const struct in6_addr *ip6_dst,
456 						    const struct in6_addr *group,
457 						    bool with_srcs, bool over_llqt,
458 						    u8 sflag, u8 *igmp_type,
459 						    bool *need_rexmit)
460 {
461 	struct net_bridge_port *p = pg ? pg->port : NULL;
462 	struct net_bridge_group_src *ent;
463 	size_t pkt_size, mld_hdr_size;
464 	unsigned long now = jiffies;
465 	struct mld2_query *mld2q;
466 	void *csum_start = NULL;
467 	unsigned long interval;
468 	__sum16 *csum = NULL;
469 	struct ipv6hdr *ip6h;
470 	struct mld_msg *mldq;
471 	struct sk_buff *skb;
472 	unsigned long llqt;
473 	struct ethhdr *eth;
474 	u16 llqt_srcs = 0;
475 	u8 *hopopt;
476 
477 	mld_hdr_size = sizeof(*mldq);
478 	if (br->multicast_mld_version == 2) {
479 		mld_hdr_size = sizeof(*mld2q);
480 		if (pg && with_srcs) {
481 			llqt = now + (br->multicast_last_member_interval *
482 				      br->multicast_last_member_count);
483 			hlist_for_each_entry(ent, &pg->src_list, node) {
484 				if (over_llqt == time_after(ent->timer.expires,
485 							    llqt) &&
486 				    ent->src_query_rexmit_cnt > 0)
487 					llqt_srcs++;
488 			}
489 
490 			if (!llqt_srcs)
491 				return NULL;
492 			mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
493 		}
494 	}
495 
496 	pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
497 	if ((p && pkt_size > p->dev->mtu) ||
498 	    pkt_size > br->dev->mtu)
499 		return NULL;
500 
501 	skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
502 	if (!skb)
503 		goto out;
504 
505 	skb->protocol = htons(ETH_P_IPV6);
506 
507 	/* Ethernet header */
508 	skb_reset_mac_header(skb);
509 	eth = eth_hdr(skb);
510 
511 	ether_addr_copy(eth->h_source, br->dev->dev_addr);
512 	eth->h_proto = htons(ETH_P_IPV6);
513 	skb_put(skb, sizeof(*eth));
514 
515 	/* IPv6 header + HbH option */
516 	skb_set_network_header(skb, skb->len);
517 	ip6h = ipv6_hdr(skb);
518 
519 	*(__force __be32 *)ip6h = htonl(0x60000000);
520 	ip6h->payload_len = htons(8 + mld_hdr_size);
521 	ip6h->nexthdr = IPPROTO_HOPOPTS;
522 	ip6h->hop_limit = 1;
523 	ip6h->daddr = *ip6_dst;
524 	if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
525 			       &ip6h->saddr)) {
526 		kfree_skb(skb);
527 		br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
528 		return NULL;
529 	}
530 
531 	br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
532 	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
533 
534 	hopopt = (u8 *)(ip6h + 1);
535 	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
536 	hopopt[1] = 0;				/* length of HbH */
537 	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
538 	hopopt[3] = 2;				/* Length of RA Option */
539 	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
540 	hopopt[5] = 0;
541 	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
542 	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
543 
544 	skb_put(skb, sizeof(*ip6h) + 8);
545 
546 	/* ICMPv6 */
547 	skb_set_transport_header(skb, skb->len);
548 	interval = ipv6_addr_any(group) ?
549 			br->multicast_query_response_interval :
550 			br->multicast_last_member_interval;
551 	*igmp_type = ICMPV6_MGM_QUERY;
552 	switch (br->multicast_mld_version) {
553 	case 1:
554 		mldq = (struct mld_msg *)icmp6_hdr(skb);
555 		mldq->mld_type = ICMPV6_MGM_QUERY;
556 		mldq->mld_code = 0;
557 		mldq->mld_cksum = 0;
558 		mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
559 		mldq->mld_reserved = 0;
560 		mldq->mld_mca = *group;
561 		csum = &mldq->mld_cksum;
562 		csum_start = (void *)mldq;
563 		break;
564 	case 2:
565 		mld2q = (struct mld2_query *)icmp6_hdr(skb);
566 		mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
567 		mld2q->mld2q_type = ICMPV6_MGM_QUERY;
568 		mld2q->mld2q_code = 0;
569 		mld2q->mld2q_cksum = 0;
570 		mld2q->mld2q_resv1 = 0;
571 		mld2q->mld2q_resv2 = 0;
572 		mld2q->mld2q_suppress = sflag;
573 		mld2q->mld2q_qrv = 2;
574 		mld2q->mld2q_nsrcs = htons(llqt_srcs);
575 		mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
576 		mld2q->mld2q_mca = *group;
577 		csum = &mld2q->mld2q_cksum;
578 		csum_start = (void *)mld2q;
579 		if (!pg || !with_srcs)
580 			break;
581 
582 		llqt_srcs = 0;
583 		hlist_for_each_entry(ent, &pg->src_list, node) {
584 			if (over_llqt == time_after(ent->timer.expires,
585 						    llqt) &&
586 			    ent->src_query_rexmit_cnt > 0) {
587 				mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.u.ip6;
588 				ent->src_query_rexmit_cnt--;
589 				if (need_rexmit && ent->src_query_rexmit_cnt)
590 					*need_rexmit = true;
591 			}
592 		}
593 		if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
594 			kfree_skb(skb);
595 			return NULL;
596 		}
597 		break;
598 	}
599 
600 	if (WARN_ON(!csum || !csum_start)) {
601 		kfree_skb(skb);
602 		return NULL;
603 	}
604 
605 	*csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
606 				IPPROTO_ICMPV6,
607 				csum_partial(csum_start, mld_hdr_size, 0));
608 	skb_put(skb, mld_hdr_size);
609 	__skb_pull(skb, sizeof(*eth));
610 
611 out:
612 	return skb;
613 }
614 #endif
615 
616 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
617 						struct net_bridge_port_group *pg,
618 						struct br_ip *ip_dst,
619 						struct br_ip *group,
620 						bool with_srcs, bool over_lmqt,
621 						u8 sflag, u8 *igmp_type,
622 						bool *need_rexmit)
623 {
624 	__be32 ip4_dst;
625 
626 	switch (group->proto) {
627 	case htons(ETH_P_IP):
628 		ip4_dst = ip_dst ? ip_dst->u.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
629 		return br_ip4_multicast_alloc_query(br, pg,
630 						    ip4_dst, group->u.ip4,
631 						    with_srcs, over_lmqt,
632 						    sflag, igmp_type,
633 						    need_rexmit);
634 #if IS_ENABLED(CONFIG_IPV6)
635 	case htons(ETH_P_IPV6): {
636 		struct in6_addr ip6_dst;
637 
638 		if (ip_dst)
639 			ip6_dst = ip_dst->u.ip6;
640 		else
641 			ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
642 				      htonl(1));
643 
644 		return br_ip6_multicast_alloc_query(br, pg,
645 						    &ip6_dst, &group->u.ip6,
646 						    with_srcs, over_lmqt,
647 						    sflag, igmp_type,
648 						    need_rexmit);
649 	}
650 #endif
651 	}
652 	return NULL;
653 }
654 
655 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
656 						    struct br_ip *group)
657 {
658 	struct net_bridge_mdb_entry *mp;
659 	int err;
660 
661 	mp = br_mdb_ip_get(br, group);
662 	if (mp)
663 		return mp;
664 
665 	if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
666 		br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
667 		return ERR_PTR(-E2BIG);
668 	}
669 
670 	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
671 	if (unlikely(!mp))
672 		return ERR_PTR(-ENOMEM);
673 
674 	mp->br = br;
675 	mp->addr = *group;
676 	mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
677 	timer_setup(&mp->timer, br_multicast_group_expired, 0);
678 	err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
679 					    br_mdb_rht_params);
680 	if (err) {
681 		kfree(mp);
682 		mp = ERR_PTR(err);
683 	} else {
684 		hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
685 	}
686 
687 	return mp;
688 }
689 
690 static void br_multicast_group_src_expired(struct timer_list *t)
691 {
692 	struct net_bridge_group_src *src = from_timer(src, t, timer);
693 	struct net_bridge_port_group *pg;
694 	struct net_bridge *br = src->br;
695 
696 	spin_lock(&br->multicast_lock);
697 	if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
698 	    timer_pending(&src->timer))
699 		goto out;
700 
701 	pg = src->pg;
702 	if (pg->filter_mode == MCAST_INCLUDE) {
703 		br_multicast_del_group_src(src);
704 		if (!hlist_empty(&pg->src_list))
705 			goto out;
706 		br_multicast_find_del_pg(br, pg);
707 	}
708 out:
709 	spin_unlock(&br->multicast_lock);
710 }
711 
712 static struct net_bridge_group_src *
713 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
714 {
715 	struct net_bridge_group_src *ent;
716 
717 	switch (ip->proto) {
718 	case htons(ETH_P_IP):
719 		hlist_for_each_entry(ent, &pg->src_list, node)
720 			if (ip->u.ip4 == ent->addr.u.ip4)
721 				return ent;
722 		break;
723 #if IS_ENABLED(CONFIG_IPV6)
724 	case htons(ETH_P_IPV6):
725 		hlist_for_each_entry(ent, &pg->src_list, node)
726 			if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6))
727 				return ent;
728 		break;
729 #endif
730 	}
731 
732 	return NULL;
733 }
734 
735 static struct net_bridge_group_src *
736 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
737 {
738 	struct net_bridge_group_src *grp_src;
739 
740 	if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
741 		return NULL;
742 
743 	switch (src_ip->proto) {
744 	case htons(ETH_P_IP):
745 		if (ipv4_is_zeronet(src_ip->u.ip4) ||
746 		    ipv4_is_multicast(src_ip->u.ip4))
747 			return NULL;
748 		break;
749 #if IS_ENABLED(CONFIG_IPV6)
750 	case htons(ETH_P_IPV6):
751 		if (ipv6_addr_any(&src_ip->u.ip6) ||
752 		    ipv6_addr_is_multicast(&src_ip->u.ip6))
753 			return NULL;
754 		break;
755 #endif
756 	}
757 
758 	grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
759 	if (unlikely(!grp_src))
760 		return NULL;
761 
762 	grp_src->pg = pg;
763 	grp_src->br = pg->port->br;
764 	grp_src->addr = *src_ip;
765 	grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
766 	timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
767 
768 	hlist_add_head_rcu(&grp_src->node, &pg->src_list);
769 	pg->src_ents++;
770 
771 	return grp_src;
772 }
773 
774 struct net_bridge_port_group *br_multicast_new_port_group(
775 			struct net_bridge_port *port,
776 			struct br_ip *group,
777 			struct net_bridge_port_group __rcu *next,
778 			unsigned char flags,
779 			const unsigned char *src,
780 			u8 filter_mode)
781 {
782 	struct net_bridge_port_group *p;
783 
784 	p = kzalloc(sizeof(*p), GFP_ATOMIC);
785 	if (unlikely(!p))
786 		return NULL;
787 
788 	p->addr = *group;
789 	p->port = port;
790 	p->flags = flags;
791 	p->filter_mode = filter_mode;
792 	p->mcast_gc.destroy = br_multicast_destroy_port_group;
793 	INIT_HLIST_HEAD(&p->src_list);
794 	rcu_assign_pointer(p->next, next);
795 	timer_setup(&p->timer, br_multicast_port_group_expired, 0);
796 	timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
797 	hlist_add_head(&p->mglist, &port->mglist);
798 
799 	if (src)
800 		memcpy(p->eth_addr, src, ETH_ALEN);
801 	else
802 		eth_broadcast_addr(p->eth_addr);
803 
804 	return p;
805 }
806 
807 static bool br_port_group_equal(struct net_bridge_port_group *p,
808 				struct net_bridge_port *port,
809 				const unsigned char *src)
810 {
811 	if (p->port != port)
812 		return false;
813 
814 	if (!(port->flags & BR_MULTICAST_TO_UNICAST))
815 		return true;
816 
817 	return ether_addr_equal(src, p->eth_addr);
818 }
819 
820 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
821 {
822 	if (!mp->host_joined) {
823 		mp->host_joined = true;
824 		if (notify)
825 			br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
826 	}
827 	mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
828 }
829 
830 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
831 {
832 	if (!mp->host_joined)
833 		return;
834 
835 	mp->host_joined = false;
836 	if (notify)
837 		br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
838 }
839 
840 static int br_multicast_add_group(struct net_bridge *br,
841 				  struct net_bridge_port *port,
842 				  struct br_ip *group,
843 				  const unsigned char *src,
844 				  u8 filter_mode,
845 				  bool igmpv2_mldv1)
846 {
847 	struct net_bridge_port_group __rcu **pp;
848 	struct net_bridge_port_group *p;
849 	struct net_bridge_mdb_entry *mp;
850 	unsigned long now = jiffies;
851 	int err;
852 
853 	spin_lock(&br->multicast_lock);
854 	if (!netif_running(br->dev) ||
855 	    (port && port->state == BR_STATE_DISABLED))
856 		goto out;
857 
858 	mp = br_multicast_new_group(br, group);
859 	err = PTR_ERR(mp);
860 	if (IS_ERR(mp))
861 		goto err;
862 
863 	if (!port) {
864 		br_multicast_host_join(mp, true);
865 		goto out;
866 	}
867 
868 	for (pp = &mp->ports;
869 	     (p = mlock_dereference(*pp, br)) != NULL;
870 	     pp = &p->next) {
871 		if (br_port_group_equal(p, port, src))
872 			goto found;
873 		if ((unsigned long)p->port < (unsigned long)port)
874 			break;
875 	}
876 
877 	p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode);
878 	if (unlikely(!p))
879 		goto err;
880 	rcu_assign_pointer(*pp, p);
881 	br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
882 
883 found:
884 	if (igmpv2_mldv1)
885 		mod_timer(&p->timer, now + br->multicast_membership_interval);
886 
887 out:
888 	err = 0;
889 
890 err:
891 	spin_unlock(&br->multicast_lock);
892 	return err;
893 }
894 
895 static int br_ip4_multicast_add_group(struct net_bridge *br,
896 				      struct net_bridge_port *port,
897 				      __be32 group,
898 				      __u16 vid,
899 				      const unsigned char *src,
900 				      bool igmpv2)
901 {
902 	struct br_ip br_group;
903 	u8 filter_mode;
904 
905 	if (ipv4_is_local_multicast(group))
906 		return 0;
907 
908 	memset(&br_group, 0, sizeof(br_group));
909 	br_group.u.ip4 = group;
910 	br_group.proto = htons(ETH_P_IP);
911 	br_group.vid = vid;
912 	filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
913 
914 	return br_multicast_add_group(br, port, &br_group, src, filter_mode,
915 				      igmpv2);
916 }
917 
918 #if IS_ENABLED(CONFIG_IPV6)
919 static int br_ip6_multicast_add_group(struct net_bridge *br,
920 				      struct net_bridge_port *port,
921 				      const struct in6_addr *group,
922 				      __u16 vid,
923 				      const unsigned char *src,
924 				      bool mldv1)
925 {
926 	struct br_ip br_group;
927 	u8 filter_mode;
928 
929 	if (ipv6_addr_is_ll_all_nodes(group))
930 		return 0;
931 
932 	memset(&br_group, 0, sizeof(br_group));
933 	br_group.u.ip6 = *group;
934 	br_group.proto = htons(ETH_P_IPV6);
935 	br_group.vid = vid;
936 	filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
937 
938 	return br_multicast_add_group(br, port, &br_group, src, filter_mode,
939 				      mldv1);
940 }
941 #endif
942 
943 static void br_multicast_router_expired(struct timer_list *t)
944 {
945 	struct net_bridge_port *port =
946 			from_timer(port, t, multicast_router_timer);
947 	struct net_bridge *br = port->br;
948 
949 	spin_lock(&br->multicast_lock);
950 	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
951 	    port->multicast_router == MDB_RTR_TYPE_PERM ||
952 	    timer_pending(&port->multicast_router_timer))
953 		goto out;
954 
955 	__del_port_router(port);
956 out:
957 	spin_unlock(&br->multicast_lock);
958 }
959 
960 static void br_mc_router_state_change(struct net_bridge *p,
961 				      bool is_mc_router)
962 {
963 	struct switchdev_attr attr = {
964 		.orig_dev = p->dev,
965 		.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
966 		.flags = SWITCHDEV_F_DEFER,
967 		.u.mrouter = is_mc_router,
968 	};
969 
970 	switchdev_port_attr_set(p->dev, &attr);
971 }
972 
973 static void br_multicast_local_router_expired(struct timer_list *t)
974 {
975 	struct net_bridge *br = from_timer(br, t, multicast_router_timer);
976 
977 	spin_lock(&br->multicast_lock);
978 	if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
979 	    br->multicast_router == MDB_RTR_TYPE_PERM ||
980 	    timer_pending(&br->multicast_router_timer))
981 		goto out;
982 
983 	br_mc_router_state_change(br, false);
984 out:
985 	spin_unlock(&br->multicast_lock);
986 }
987 
988 static void br_multicast_querier_expired(struct net_bridge *br,
989 					 struct bridge_mcast_own_query *query)
990 {
991 	spin_lock(&br->multicast_lock);
992 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
993 		goto out;
994 
995 	br_multicast_start_querier(br, query);
996 
997 out:
998 	spin_unlock(&br->multicast_lock);
999 }
1000 
1001 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1002 {
1003 	struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
1004 
1005 	br_multicast_querier_expired(br, &br->ip4_own_query);
1006 }
1007 
1008 #if IS_ENABLED(CONFIG_IPV6)
1009 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1010 {
1011 	struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
1012 
1013 	br_multicast_querier_expired(br, &br->ip6_own_query);
1014 }
1015 #endif
1016 
1017 static void br_multicast_select_own_querier(struct net_bridge *br,
1018 					    struct br_ip *ip,
1019 					    struct sk_buff *skb)
1020 {
1021 	if (ip->proto == htons(ETH_P_IP))
1022 		br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
1023 #if IS_ENABLED(CONFIG_IPV6)
1024 	else
1025 		br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
1026 #endif
1027 }
1028 
1029 static void __br_multicast_send_query(struct net_bridge *br,
1030 				      struct net_bridge_port *port,
1031 				      struct net_bridge_port_group *pg,
1032 				      struct br_ip *ip_dst,
1033 				      struct br_ip *group,
1034 				      bool with_srcs,
1035 				      u8 sflag,
1036 				      bool *need_rexmit)
1037 {
1038 	bool over_lmqt = !!sflag;
1039 	struct sk_buff *skb;
1040 	u8 igmp_type;
1041 
1042 again_under_lmqt:
1043 	skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs,
1044 				       over_lmqt, sflag, &igmp_type,
1045 				       need_rexmit);
1046 	if (!skb)
1047 		return;
1048 
1049 	if (port) {
1050 		skb->dev = port->dev;
1051 		br_multicast_count(br, port, skb, igmp_type,
1052 				   BR_MCAST_DIR_TX);
1053 		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1054 			dev_net(port->dev), NULL, skb, NULL, skb->dev,
1055 			br_dev_queue_push_xmit);
1056 
1057 		if (over_lmqt && with_srcs && sflag) {
1058 			over_lmqt = false;
1059 			goto again_under_lmqt;
1060 		}
1061 	} else {
1062 		br_multicast_select_own_querier(br, group, skb);
1063 		br_multicast_count(br, port, skb, igmp_type,
1064 				   BR_MCAST_DIR_RX);
1065 		netif_rx(skb);
1066 	}
1067 }
1068 
1069 static void br_multicast_send_query(struct net_bridge *br,
1070 				    struct net_bridge_port *port,
1071 				    struct bridge_mcast_own_query *own_query)
1072 {
1073 	struct bridge_mcast_other_query *other_query = NULL;
1074 	struct br_ip br_group;
1075 	unsigned long time;
1076 
1077 	if (!netif_running(br->dev) ||
1078 	    !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1079 	    !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1080 		return;
1081 
1082 	memset(&br_group.u, 0, sizeof(br_group.u));
1083 
1084 	if (port ? (own_query == &port->ip4_own_query) :
1085 		   (own_query == &br->ip4_own_query)) {
1086 		other_query = &br->ip4_other_query;
1087 		br_group.proto = htons(ETH_P_IP);
1088 #if IS_ENABLED(CONFIG_IPV6)
1089 	} else {
1090 		other_query = &br->ip6_other_query;
1091 		br_group.proto = htons(ETH_P_IPV6);
1092 #endif
1093 	}
1094 
1095 	if (!other_query || timer_pending(&other_query->timer))
1096 		return;
1097 
1098 	__br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0,
1099 				  NULL);
1100 
1101 	time = jiffies;
1102 	time += own_query->startup_sent < br->multicast_startup_query_count ?
1103 		br->multicast_startup_query_interval :
1104 		br->multicast_query_interval;
1105 	mod_timer(&own_query->timer, time);
1106 }
1107 
1108 static void
1109 br_multicast_port_query_expired(struct net_bridge_port *port,
1110 				struct bridge_mcast_own_query *query)
1111 {
1112 	struct net_bridge *br = port->br;
1113 
1114 	spin_lock(&br->multicast_lock);
1115 	if (port->state == BR_STATE_DISABLED ||
1116 	    port->state == BR_STATE_BLOCKING)
1117 		goto out;
1118 
1119 	if (query->startup_sent < br->multicast_startup_query_count)
1120 		query->startup_sent++;
1121 
1122 	br_multicast_send_query(port->br, port, query);
1123 
1124 out:
1125 	spin_unlock(&br->multicast_lock);
1126 }
1127 
1128 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1129 {
1130 	struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
1131 
1132 	br_multicast_port_query_expired(port, &port->ip4_own_query);
1133 }
1134 
1135 #if IS_ENABLED(CONFIG_IPV6)
1136 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1137 {
1138 	struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
1139 
1140 	br_multicast_port_query_expired(port, &port->ip6_own_query);
1141 }
1142 #endif
1143 
1144 static void br_multicast_port_group_rexmit(struct timer_list *t)
1145 {
1146 	struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1147 	struct bridge_mcast_other_query *other_query = NULL;
1148 	struct net_bridge *br = pg->port->br;
1149 	bool need_rexmit = false;
1150 
1151 	spin_lock(&br->multicast_lock);
1152 	if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1153 	    !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1154 	    !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1155 		goto out;
1156 
1157 	if (pg->addr.proto == htons(ETH_P_IP))
1158 		other_query = &br->ip4_other_query;
1159 #if IS_ENABLED(CONFIG_IPV6)
1160 	else
1161 		other_query = &br->ip6_other_query;
1162 #endif
1163 
1164 	if (!other_query || timer_pending(&other_query->timer))
1165 		goto out;
1166 
1167 	if (pg->grp_query_rexmit_cnt) {
1168 		pg->grp_query_rexmit_cnt--;
1169 		__br_multicast_send_query(br, pg->port, pg, &pg->addr,
1170 					  &pg->addr, false, 1, NULL);
1171 	}
1172 	__br_multicast_send_query(br, pg->port, pg, &pg->addr,
1173 				  &pg->addr, true, 0, &need_rexmit);
1174 
1175 	if (pg->grp_query_rexmit_cnt || need_rexmit)
1176 		mod_timer(&pg->rexmit_timer, jiffies +
1177 					     br->multicast_last_member_interval);
1178 out:
1179 	spin_unlock(&br->multicast_lock);
1180 }
1181 
1182 static void br_mc_disabled_update(struct net_device *dev, bool value)
1183 {
1184 	struct switchdev_attr attr = {
1185 		.orig_dev = dev,
1186 		.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1187 		.flags = SWITCHDEV_F_DEFER,
1188 		.u.mc_disabled = !value,
1189 	};
1190 
1191 	switchdev_port_attr_set(dev, &attr);
1192 }
1193 
1194 int br_multicast_add_port(struct net_bridge_port *port)
1195 {
1196 	port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1197 
1198 	timer_setup(&port->multicast_router_timer,
1199 		    br_multicast_router_expired, 0);
1200 	timer_setup(&port->ip4_own_query.timer,
1201 		    br_ip4_multicast_port_query_expired, 0);
1202 #if IS_ENABLED(CONFIG_IPV6)
1203 	timer_setup(&port->ip6_own_query.timer,
1204 		    br_ip6_multicast_port_query_expired, 0);
1205 #endif
1206 	br_mc_disabled_update(port->dev,
1207 			      br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
1208 
1209 	port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1210 	if (!port->mcast_stats)
1211 		return -ENOMEM;
1212 
1213 	return 0;
1214 }
1215 
1216 void br_multicast_del_port(struct net_bridge_port *port)
1217 {
1218 	struct net_bridge *br = port->br;
1219 	struct net_bridge_port_group *pg;
1220 	HLIST_HEAD(deleted_head);
1221 	struct hlist_node *n;
1222 
1223 	/* Take care of the remaining groups, only perm ones should be left */
1224 	spin_lock_bh(&br->multicast_lock);
1225 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1226 		br_multicast_find_del_pg(br, pg);
1227 	hlist_move_list(&br->mcast_gc_list, &deleted_head);
1228 	spin_unlock_bh(&br->multicast_lock);
1229 	br_multicast_gc(&deleted_head);
1230 	del_timer_sync(&port->multicast_router_timer);
1231 	free_percpu(port->mcast_stats);
1232 }
1233 
1234 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1235 {
1236 	query->startup_sent = 0;
1237 
1238 	if (try_to_del_timer_sync(&query->timer) >= 0 ||
1239 	    del_timer(&query->timer))
1240 		mod_timer(&query->timer, jiffies);
1241 }
1242 
1243 static void __br_multicast_enable_port(struct net_bridge_port *port)
1244 {
1245 	struct net_bridge *br = port->br;
1246 
1247 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
1248 		return;
1249 
1250 	br_multicast_enable(&port->ip4_own_query);
1251 #if IS_ENABLED(CONFIG_IPV6)
1252 	br_multicast_enable(&port->ip6_own_query);
1253 #endif
1254 	if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1255 	    hlist_unhashed(&port->rlist))
1256 		br_multicast_add_router(br, port);
1257 }
1258 
1259 void br_multicast_enable_port(struct net_bridge_port *port)
1260 {
1261 	struct net_bridge *br = port->br;
1262 
1263 	spin_lock(&br->multicast_lock);
1264 	__br_multicast_enable_port(port);
1265 	spin_unlock(&br->multicast_lock);
1266 }
1267 
1268 void br_multicast_disable_port(struct net_bridge_port *port)
1269 {
1270 	struct net_bridge *br = port->br;
1271 	struct net_bridge_port_group *pg;
1272 	struct hlist_node *n;
1273 
1274 	spin_lock(&br->multicast_lock);
1275 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1276 		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1277 			br_multicast_find_del_pg(br, pg);
1278 
1279 	__del_port_router(port);
1280 
1281 	del_timer(&port->multicast_router_timer);
1282 	del_timer(&port->ip4_own_query.timer);
1283 #if IS_ENABLED(CONFIG_IPV6)
1284 	del_timer(&port->ip6_own_query.timer);
1285 #endif
1286 	spin_unlock(&br->multicast_lock);
1287 }
1288 
1289 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1290 {
1291 	struct net_bridge_group_src *ent;
1292 	struct hlist_node *tmp;
1293 	int deleted = 0;
1294 
1295 	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1296 		if (ent->flags & BR_SGRP_F_DELETE) {
1297 			br_multicast_del_group_src(ent);
1298 			deleted++;
1299 		}
1300 
1301 	return deleted;
1302 }
1303 
1304 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
1305 {
1306 	struct bridge_mcast_other_query *other_query = NULL;
1307 	struct net_bridge *br = pg->port->br;
1308 	u32 lmqc = br->multicast_last_member_count;
1309 	unsigned long lmqt, lmi, now = jiffies;
1310 	struct net_bridge_group_src *ent;
1311 
1312 	if (!netif_running(br->dev) ||
1313 	    !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1314 		return;
1315 
1316 	if (pg->addr.proto == htons(ETH_P_IP))
1317 		other_query = &br->ip4_other_query;
1318 #if IS_ENABLED(CONFIG_IPV6)
1319 	else
1320 		other_query = &br->ip6_other_query;
1321 #endif
1322 
1323 	lmqt = now + br_multicast_lmqt(br);
1324 	hlist_for_each_entry(ent, &pg->src_list, node) {
1325 		if (ent->flags & BR_SGRP_F_SEND) {
1326 			ent->flags &= ~BR_SGRP_F_SEND;
1327 			if (ent->timer.expires > lmqt) {
1328 				if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1329 				    other_query &&
1330 				    !timer_pending(&other_query->timer))
1331 					ent->src_query_rexmit_cnt = lmqc;
1332 				mod_timer(&ent->timer, lmqt);
1333 			}
1334 		}
1335 	}
1336 
1337 	if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) ||
1338 	    !other_query || timer_pending(&other_query->timer))
1339 		return;
1340 
1341 	__br_multicast_send_query(br, pg->port, pg, &pg->addr,
1342 				  &pg->addr, true, 1, NULL);
1343 
1344 	lmi = now + br->multicast_last_member_interval;
1345 	if (!timer_pending(&pg->rexmit_timer) ||
1346 	    time_after(pg->rexmit_timer.expires, lmi))
1347 		mod_timer(&pg->rexmit_timer, lmi);
1348 }
1349 
1350 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
1351 {
1352 	struct bridge_mcast_other_query *other_query = NULL;
1353 	struct net_bridge *br = pg->port->br;
1354 	unsigned long now = jiffies, lmi;
1355 
1356 	if (!netif_running(br->dev) ||
1357 	    !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1358 		return;
1359 
1360 	if (pg->addr.proto == htons(ETH_P_IP))
1361 		other_query = &br->ip4_other_query;
1362 #if IS_ENABLED(CONFIG_IPV6)
1363 	else
1364 		other_query = &br->ip6_other_query;
1365 #endif
1366 
1367 	if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1368 	    other_query && !timer_pending(&other_query->timer)) {
1369 		lmi = now + br->multicast_last_member_interval;
1370 		pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1;
1371 		__br_multicast_send_query(br, pg->port, pg, &pg->addr,
1372 					  &pg->addr, false, 0, NULL);
1373 		if (!timer_pending(&pg->rexmit_timer) ||
1374 		    time_after(pg->rexmit_timer.expires, lmi))
1375 			mod_timer(&pg->rexmit_timer, lmi);
1376 	}
1377 
1378 	if (pg->filter_mode == MCAST_EXCLUDE &&
1379 	    (!timer_pending(&pg->timer) ||
1380 	     time_after(pg->timer.expires, now + br_multicast_lmqt(br))))
1381 		mod_timer(&pg->timer, now + br_multicast_lmqt(br));
1382 }
1383 
1384 /* State          Msg type      New state                Actions
1385  * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
1386  * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
1387  * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
1388  */
1389 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
1390 				     void *srcs, u32 nsrcs, size_t src_size)
1391 {
1392 	struct net_bridge *br = pg->port->br;
1393 	struct net_bridge_group_src *ent;
1394 	unsigned long now = jiffies;
1395 	bool changed = false;
1396 	struct br_ip src_ip;
1397 	u32 src_idx;
1398 
1399 	memset(&src_ip, 0, sizeof(src_ip));
1400 	src_ip.proto = pg->addr.proto;
1401 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1402 		memcpy(&src_ip.u, srcs, src_size);
1403 		ent = br_multicast_find_group_src(pg, &src_ip);
1404 		if (!ent) {
1405 			ent = br_multicast_new_group_src(pg, &src_ip);
1406 			if (ent)
1407 				changed = true;
1408 		}
1409 
1410 		if (ent)
1411 			mod_timer(&ent->timer, now + br_multicast_gmi(br));
1412 		srcs += src_size;
1413 	}
1414 
1415 	return changed;
1416 }
1417 
1418 /* State          Msg type      New state                Actions
1419  * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
1420  *                                                       Delete (A-B)
1421  *                                                       Group Timer=GMI
1422  */
1423 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg,
1424 				 void *srcs, u32 nsrcs, size_t src_size)
1425 {
1426 	struct net_bridge_group_src *ent;
1427 	struct br_ip src_ip;
1428 	u32 src_idx;
1429 
1430 	hlist_for_each_entry(ent, &pg->src_list, node)
1431 		ent->flags |= BR_SGRP_F_DELETE;
1432 
1433 	memset(&src_ip, 0, sizeof(src_ip));
1434 	src_ip.proto = pg->addr.proto;
1435 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1436 		memcpy(&src_ip.u, srcs, src_size);
1437 		ent = br_multicast_find_group_src(pg, &src_ip);
1438 		if (ent)
1439 			ent->flags &= ~BR_SGRP_F_DELETE;
1440 		else
1441 			br_multicast_new_group_src(pg, &src_ip);
1442 		srcs += src_size;
1443 	}
1444 
1445 	__grp_src_delete_marked(pg);
1446 }
1447 
1448 /* State          Msg type      New state                Actions
1449  * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
1450  *                                                       Delete (X-A)
1451  *                                                       Delete (Y-A)
1452  *                                                       Group Timer=GMI
1453  */
1454 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
1455 				 void *srcs, u32 nsrcs, size_t src_size)
1456 {
1457 	struct net_bridge *br = pg->port->br;
1458 	struct net_bridge_group_src *ent;
1459 	unsigned long now = jiffies;
1460 	bool changed = false;
1461 	struct br_ip src_ip;
1462 	u32 src_idx;
1463 
1464 	hlist_for_each_entry(ent, &pg->src_list, node)
1465 		ent->flags |= BR_SGRP_F_DELETE;
1466 
1467 	memset(&src_ip, 0, sizeof(src_ip));
1468 	src_ip.proto = pg->addr.proto;
1469 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1470 		memcpy(&src_ip.u, srcs, src_size);
1471 		ent = br_multicast_find_group_src(pg, &src_ip);
1472 		if (ent) {
1473 			ent->flags &= ~BR_SGRP_F_DELETE;
1474 		} else {
1475 			ent = br_multicast_new_group_src(pg, &src_ip);
1476 			if (ent) {
1477 				mod_timer(&ent->timer,
1478 					  now + br_multicast_gmi(br));
1479 				changed = true;
1480 			}
1481 		}
1482 		srcs += src_size;
1483 	}
1484 
1485 	if (__grp_src_delete_marked(pg))
1486 		changed = true;
1487 
1488 	return changed;
1489 }
1490 
1491 static bool br_multicast_isexc(struct net_bridge_port_group *pg,
1492 			       void *srcs, u32 nsrcs, size_t src_size)
1493 {
1494 	struct net_bridge *br = pg->port->br;
1495 	bool changed = false;
1496 
1497 	switch (pg->filter_mode) {
1498 	case MCAST_INCLUDE:
1499 		__grp_src_isexc_incl(pg, srcs, nsrcs, src_size);
1500 		changed = true;
1501 		break;
1502 	case MCAST_EXCLUDE:
1503 		changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size);
1504 		break;
1505 	}
1506 
1507 	pg->filter_mode = MCAST_EXCLUDE;
1508 	mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
1509 
1510 	return changed;
1511 }
1512 
1513 /* State          Msg type      New state                Actions
1514  * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
1515  *                                                       Send Q(G,A-B)
1516  */
1517 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
1518 				void *srcs, u32 nsrcs, size_t src_size)
1519 {
1520 	struct net_bridge *br = pg->port->br;
1521 	u32 src_idx, to_send = pg->src_ents;
1522 	struct net_bridge_group_src *ent;
1523 	unsigned long now = jiffies;
1524 	bool changed = false;
1525 	struct br_ip src_ip;
1526 
1527 	hlist_for_each_entry(ent, &pg->src_list, node)
1528 		ent->flags |= BR_SGRP_F_SEND;
1529 
1530 	memset(&src_ip, 0, sizeof(src_ip));
1531 	src_ip.proto = pg->addr.proto;
1532 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1533 		memcpy(&src_ip.u, srcs, src_size);
1534 		ent = br_multicast_find_group_src(pg, &src_ip);
1535 		if (ent) {
1536 			ent->flags &= ~BR_SGRP_F_SEND;
1537 			to_send--;
1538 		} else {
1539 			ent = br_multicast_new_group_src(pg, &src_ip);
1540 			if (ent)
1541 				changed = true;
1542 		}
1543 		if (ent)
1544 			mod_timer(&ent->timer, now + br_multicast_gmi(br));
1545 		srcs += src_size;
1546 	}
1547 
1548 	if (to_send)
1549 		__grp_src_query_marked_and_rexmit(pg);
1550 
1551 	return changed;
1552 }
1553 
1554 /* State          Msg type      New state                Actions
1555  * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
1556  *                                                       Send Q(G,X-A)
1557  *                                                       Send Q(G)
1558  */
1559 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
1560 				void *srcs, u32 nsrcs, size_t src_size)
1561 {
1562 	struct net_bridge *br = pg->port->br;
1563 	u32 src_idx, to_send = pg->src_ents;
1564 	struct net_bridge_group_src *ent;
1565 	unsigned long now = jiffies;
1566 	bool changed = false;
1567 	struct br_ip src_ip;
1568 
1569 	hlist_for_each_entry(ent, &pg->src_list, node)
1570 		if (timer_pending(&ent->timer))
1571 			ent->flags |= BR_SGRP_F_SEND;
1572 
1573 	memset(&src_ip, 0, sizeof(src_ip));
1574 	src_ip.proto = pg->addr.proto;
1575 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1576 		memcpy(&src_ip.u, srcs, src_size);
1577 		ent = br_multicast_find_group_src(pg, &src_ip);
1578 		if (ent) {
1579 			if (timer_pending(&ent->timer)) {
1580 				ent->flags &= ~BR_SGRP_F_SEND;
1581 				to_send--;
1582 			}
1583 		} else {
1584 			ent = br_multicast_new_group_src(pg, &src_ip);
1585 			if (ent)
1586 				changed = true;
1587 		}
1588 		if (ent)
1589 			mod_timer(&ent->timer, now + br_multicast_gmi(br));
1590 		srcs += src_size;
1591 	}
1592 
1593 	if (to_send)
1594 		__grp_src_query_marked_and_rexmit(pg);
1595 
1596 	__grp_send_query_and_rexmit(pg);
1597 
1598 	return changed;
1599 }
1600 
1601 static bool br_multicast_toin(struct net_bridge_port_group *pg,
1602 			      void *srcs, u32 nsrcs, size_t src_size)
1603 {
1604 	bool changed = false;
1605 
1606 	switch (pg->filter_mode) {
1607 	case MCAST_INCLUDE:
1608 		changed = __grp_src_toin_incl(pg, srcs, nsrcs, src_size);
1609 		break;
1610 	case MCAST_EXCLUDE:
1611 		changed = __grp_src_toin_excl(pg, srcs, nsrcs, src_size);
1612 		break;
1613 	}
1614 
1615 	return changed;
1616 }
1617 
1618 /* State          Msg type      New state                Actions
1619  * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
1620  *                                                       Delete (A-B)
1621  *                                                       Send Q(G,A*B)
1622  *                                                       Group Timer=GMI
1623  */
1624 static void __grp_src_toex_incl(struct net_bridge_port_group *pg,
1625 				void *srcs, u32 nsrcs, size_t src_size)
1626 {
1627 	struct net_bridge_group_src *ent;
1628 	u32 src_idx, to_send = 0;
1629 	struct br_ip src_ip;
1630 
1631 	hlist_for_each_entry(ent, &pg->src_list, node)
1632 		ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
1633 
1634 	memset(&src_ip, 0, sizeof(src_ip));
1635 	src_ip.proto = pg->addr.proto;
1636 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1637 		memcpy(&src_ip.u, srcs, src_size);
1638 		ent = br_multicast_find_group_src(pg, &src_ip);
1639 		if (ent) {
1640 			ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
1641 				     BR_SGRP_F_SEND;
1642 			to_send++;
1643 		} else {
1644 			br_multicast_new_group_src(pg, &src_ip);
1645 		}
1646 		srcs += src_size;
1647 	}
1648 
1649 	__grp_src_delete_marked(pg);
1650 	if (to_send)
1651 		__grp_src_query_marked_and_rexmit(pg);
1652 }
1653 
1654 /* State          Msg type      New state                Actions
1655  * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
1656  *                                                       Delete (X-A)
1657  *                                                       Delete (Y-A)
1658  *                                                       Send Q(G,A-Y)
1659  *                                                       Group Timer=GMI
1660  */
1661 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg,
1662 				void *srcs, u32 nsrcs, size_t src_size)
1663 {
1664 	struct net_bridge_group_src *ent;
1665 	u32 src_idx, to_send = 0;
1666 	bool changed = false;
1667 	struct br_ip src_ip;
1668 
1669 	hlist_for_each_entry(ent, &pg->src_list, node)
1670 		ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
1671 
1672 	memset(&src_ip, 0, sizeof(src_ip));
1673 	src_ip.proto = pg->addr.proto;
1674 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1675 		memcpy(&src_ip.u, srcs, src_size);
1676 		ent = br_multicast_find_group_src(pg, &src_ip);
1677 		if (ent) {
1678 			ent->flags &= ~BR_SGRP_F_DELETE;
1679 		} else {
1680 			ent = br_multicast_new_group_src(pg, &src_ip);
1681 			if (ent) {
1682 				mod_timer(&ent->timer, pg->timer.expires);
1683 				changed = true;
1684 			}
1685 		}
1686 		if (ent && timer_pending(&ent->timer)) {
1687 			ent->flags |= BR_SGRP_F_SEND;
1688 			to_send++;
1689 		}
1690 		srcs += src_size;
1691 	}
1692 
1693 	if (__grp_src_delete_marked(pg))
1694 		changed = true;
1695 	if (to_send)
1696 		__grp_src_query_marked_and_rexmit(pg);
1697 
1698 	return changed;
1699 }
1700 
1701 static bool br_multicast_toex(struct net_bridge_port_group *pg,
1702 			      void *srcs, u32 nsrcs, size_t src_size)
1703 {
1704 	struct net_bridge *br = pg->port->br;
1705 	bool changed = false;
1706 
1707 	switch (pg->filter_mode) {
1708 	case MCAST_INCLUDE:
1709 		__grp_src_toex_incl(pg, srcs, nsrcs, src_size);
1710 		changed = true;
1711 		break;
1712 	case MCAST_EXCLUDE:
1713 		__grp_src_toex_excl(pg, srcs, nsrcs, src_size);
1714 		break;
1715 	}
1716 
1717 	pg->filter_mode = MCAST_EXCLUDE;
1718 	mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
1719 
1720 	return changed;
1721 }
1722 
1723 /* State          Msg type      New state                Actions
1724  * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
1725  */
1726 static void __grp_src_block_incl(struct net_bridge_port_group *pg,
1727 				 void *srcs, u32 nsrcs, size_t src_size)
1728 {
1729 	struct net_bridge_group_src *ent;
1730 	u32 src_idx, to_send = 0;
1731 	struct br_ip src_ip;
1732 
1733 	hlist_for_each_entry(ent, &pg->src_list, node)
1734 		ent->flags &= ~BR_SGRP_F_SEND;
1735 
1736 	memset(&src_ip, 0, sizeof(src_ip));
1737 	src_ip.proto = pg->addr.proto;
1738 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1739 		memcpy(&src_ip.u, srcs, src_size);
1740 		ent = br_multicast_find_group_src(pg, &src_ip);
1741 		if (ent) {
1742 			ent->flags |= BR_SGRP_F_SEND;
1743 			to_send++;
1744 		}
1745 		srcs += src_size;
1746 	}
1747 
1748 	if (to_send)
1749 		__grp_src_query_marked_and_rexmit(pg);
1750 
1751 	if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list))
1752 		br_multicast_find_del_pg(pg->port->br, pg);
1753 }
1754 
1755 /* State          Msg type      New state                Actions
1756  * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
1757  *                                                       Send Q(G,A-Y)
1758  */
1759 static bool __grp_src_block_excl(struct net_bridge_port_group *pg,
1760 				 void *srcs, u32 nsrcs, size_t src_size)
1761 {
1762 	struct net_bridge_group_src *ent;
1763 	u32 src_idx, to_send = 0;
1764 	bool changed = false;
1765 	struct br_ip src_ip;
1766 
1767 	hlist_for_each_entry(ent, &pg->src_list, node)
1768 		ent->flags &= ~BR_SGRP_F_SEND;
1769 
1770 	memset(&src_ip, 0, sizeof(src_ip));
1771 	src_ip.proto = pg->addr.proto;
1772 	for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1773 		memcpy(&src_ip.u, srcs, src_size);
1774 		ent = br_multicast_find_group_src(pg, &src_ip);
1775 		if (!ent) {
1776 			ent = br_multicast_new_group_src(pg, &src_ip);
1777 			if (ent) {
1778 				mod_timer(&ent->timer, pg->timer.expires);
1779 				changed = true;
1780 			}
1781 		}
1782 		if (ent && timer_pending(&ent->timer)) {
1783 			ent->flags |= BR_SGRP_F_SEND;
1784 			to_send++;
1785 		}
1786 		srcs += src_size;
1787 	}
1788 
1789 	if (to_send)
1790 		__grp_src_query_marked_and_rexmit(pg);
1791 
1792 	return changed;
1793 }
1794 
1795 static bool br_multicast_block(struct net_bridge_port_group *pg,
1796 			       void *srcs, u32 nsrcs, size_t src_size)
1797 {
1798 	bool changed = false;
1799 
1800 	switch (pg->filter_mode) {
1801 	case MCAST_INCLUDE:
1802 		__grp_src_block_incl(pg, srcs, nsrcs, src_size);
1803 		break;
1804 	case MCAST_EXCLUDE:
1805 		changed = __grp_src_block_excl(pg, srcs, nsrcs, src_size);
1806 		break;
1807 	}
1808 
1809 	return changed;
1810 }
1811 
1812 static struct net_bridge_port_group *
1813 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
1814 		       struct net_bridge_port *p,
1815 		       const unsigned char *src)
1816 {
1817 	struct net_bridge *br __maybe_unused = mp->br;
1818 	struct net_bridge_port_group *pg;
1819 
1820 	for (pg = mlock_dereference(mp->ports, br);
1821 	     pg;
1822 	     pg = mlock_dereference(pg->next, br))
1823 		if (br_port_group_equal(pg, p, src))
1824 			return pg;
1825 
1826 	return NULL;
1827 }
1828 
1829 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1830 					 struct net_bridge_port *port,
1831 					 struct sk_buff *skb,
1832 					 u16 vid)
1833 {
1834 	bool igmpv2 = br->multicast_igmp_version == 2;
1835 	struct net_bridge_mdb_entry *mdst;
1836 	struct net_bridge_port_group *pg;
1837 	const unsigned char *src;
1838 	struct igmpv3_report *ih;
1839 	struct igmpv3_grec *grec;
1840 	int i, len, num, type;
1841 	bool changed = false;
1842 	__be32 group;
1843 	int err = 0;
1844 	u16 nsrcs;
1845 
1846 	ih = igmpv3_report_hdr(skb);
1847 	num = ntohs(ih->ngrec);
1848 	len = skb_transport_offset(skb) + sizeof(*ih);
1849 
1850 	for (i = 0; i < num; i++) {
1851 		len += sizeof(*grec);
1852 		if (!ip_mc_may_pull(skb, len))
1853 			return -EINVAL;
1854 
1855 		grec = (void *)(skb->data + len - sizeof(*grec));
1856 		group = grec->grec_mca;
1857 		type = grec->grec_type;
1858 		nsrcs = ntohs(grec->grec_nsrcs);
1859 
1860 		len += nsrcs * 4;
1861 		if (!ip_mc_may_pull(skb, len))
1862 			return -EINVAL;
1863 
1864 		switch (type) {
1865 		case IGMPV3_MODE_IS_INCLUDE:
1866 		case IGMPV3_MODE_IS_EXCLUDE:
1867 		case IGMPV3_CHANGE_TO_INCLUDE:
1868 		case IGMPV3_CHANGE_TO_EXCLUDE:
1869 		case IGMPV3_ALLOW_NEW_SOURCES:
1870 		case IGMPV3_BLOCK_OLD_SOURCES:
1871 			break;
1872 
1873 		default:
1874 			continue;
1875 		}
1876 
1877 		src = eth_hdr(skb)->h_source;
1878 		if (nsrcs == 0 &&
1879 		    (type == IGMPV3_CHANGE_TO_INCLUDE ||
1880 		     type == IGMPV3_MODE_IS_INCLUDE)) {
1881 			if (!port || igmpv2) {
1882 				br_ip4_multicast_leave_group(br, port, group, vid, src);
1883 				continue;
1884 			}
1885 		} else {
1886 			err = br_ip4_multicast_add_group(br, port, group, vid,
1887 							 src, igmpv2);
1888 			if (err)
1889 				break;
1890 		}
1891 
1892 		if (!port || igmpv2)
1893 			continue;
1894 
1895 		spin_lock_bh(&br->multicast_lock);
1896 		mdst = br_mdb_ip4_get(br, group, vid);
1897 		if (!mdst)
1898 			goto unlock_continue;
1899 		pg = br_multicast_find_port(mdst, port, src);
1900 		if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
1901 			goto unlock_continue;
1902 		/* reload grec */
1903 		grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
1904 		switch (type) {
1905 		case IGMPV3_ALLOW_NEW_SOURCES:
1906 			changed = br_multicast_isinc_allow(pg, grec->grec_src,
1907 							   nsrcs, sizeof(__be32));
1908 			break;
1909 		case IGMPV3_MODE_IS_INCLUDE:
1910 			changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
1911 							   sizeof(__be32));
1912 			break;
1913 		case IGMPV3_MODE_IS_EXCLUDE:
1914 			changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
1915 						     sizeof(__be32));
1916 			break;
1917 		case IGMPV3_CHANGE_TO_INCLUDE:
1918 			changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
1919 						    sizeof(__be32));
1920 			break;
1921 		case IGMPV3_CHANGE_TO_EXCLUDE:
1922 			changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
1923 						    sizeof(__be32));
1924 			break;
1925 		case IGMPV3_BLOCK_OLD_SOURCES:
1926 			changed = br_multicast_block(pg, grec->grec_src, nsrcs,
1927 						     sizeof(__be32));
1928 			break;
1929 		}
1930 		if (changed)
1931 			br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
1932 unlock_continue:
1933 		spin_unlock_bh(&br->multicast_lock);
1934 	}
1935 
1936 	return err;
1937 }
1938 
1939 #if IS_ENABLED(CONFIG_IPV6)
1940 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1941 					struct net_bridge_port *port,
1942 					struct sk_buff *skb,
1943 					u16 vid)
1944 {
1945 	bool mldv1 = br->multicast_mld_version == 1;
1946 	struct net_bridge_mdb_entry *mdst;
1947 	struct net_bridge_port_group *pg;
1948 	unsigned int nsrcs_offset;
1949 	const unsigned char *src;
1950 	struct icmp6hdr *icmp6h;
1951 	struct mld2_grec *grec;
1952 	unsigned int grec_len;
1953 	bool changed = false;
1954 	int i, len, num;
1955 	int err = 0;
1956 
1957 	if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
1958 		return -EINVAL;
1959 
1960 	icmp6h = icmp6_hdr(skb);
1961 	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1962 	len = skb_transport_offset(skb) + sizeof(*icmp6h);
1963 
1964 	for (i = 0; i < num; i++) {
1965 		__be16 *_nsrcs, __nsrcs;
1966 		u16 nsrcs;
1967 
1968 		nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1969 
1970 		if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1971 		    nsrcs_offset + sizeof(__nsrcs))
1972 			return -EINVAL;
1973 
1974 		_nsrcs = skb_header_pointer(skb, nsrcs_offset,
1975 					    sizeof(__nsrcs), &__nsrcs);
1976 		if (!_nsrcs)
1977 			return -EINVAL;
1978 
1979 		nsrcs = ntohs(*_nsrcs);
1980 		grec_len = struct_size(grec, grec_src, nsrcs);
1981 
1982 		if (!ipv6_mc_may_pull(skb, len + grec_len))
1983 			return -EINVAL;
1984 
1985 		grec = (struct mld2_grec *)(skb->data + len);
1986 		len += grec_len;
1987 
1988 		switch (grec->grec_type) {
1989 		case MLD2_MODE_IS_INCLUDE:
1990 		case MLD2_MODE_IS_EXCLUDE:
1991 		case MLD2_CHANGE_TO_INCLUDE:
1992 		case MLD2_CHANGE_TO_EXCLUDE:
1993 		case MLD2_ALLOW_NEW_SOURCES:
1994 		case MLD2_BLOCK_OLD_SOURCES:
1995 			break;
1996 
1997 		default:
1998 			continue;
1999 		}
2000 
2001 		src = eth_hdr(skb)->h_source;
2002 		if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2003 		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2004 		    nsrcs == 0) {
2005 			if (!port || mldv1) {
2006 				br_ip6_multicast_leave_group(br, port,
2007 							     &grec->grec_mca,
2008 							     vid, src);
2009 				continue;
2010 			}
2011 		} else {
2012 			err = br_ip6_multicast_add_group(br, port,
2013 							 &grec->grec_mca, vid,
2014 							 src, mldv1);
2015 			if (err)
2016 				break;
2017 		}
2018 
2019 		if (!port || mldv1)
2020 			continue;
2021 
2022 		spin_lock_bh(&br->multicast_lock);
2023 		mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid);
2024 		if (!mdst)
2025 			goto unlock_continue;
2026 		pg = br_multicast_find_port(mdst, port, src);
2027 		if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2028 			goto unlock_continue;
2029 		switch (grec->grec_type) {
2030 		case MLD2_ALLOW_NEW_SOURCES:
2031 			changed = br_multicast_isinc_allow(pg, grec->grec_src,
2032 							   nsrcs,
2033 							   sizeof(struct in6_addr));
2034 			break;
2035 		case MLD2_MODE_IS_INCLUDE:
2036 			changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
2037 							   sizeof(struct in6_addr));
2038 			break;
2039 		case MLD2_MODE_IS_EXCLUDE:
2040 			changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
2041 						     sizeof(struct in6_addr));
2042 			break;
2043 		case MLD2_CHANGE_TO_INCLUDE:
2044 			changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
2045 						    sizeof(struct in6_addr));
2046 			break;
2047 		case MLD2_CHANGE_TO_EXCLUDE:
2048 			changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
2049 						    sizeof(struct in6_addr));
2050 			break;
2051 		case MLD2_BLOCK_OLD_SOURCES:
2052 			changed = br_multicast_block(pg, grec->grec_src, nsrcs,
2053 						     sizeof(struct in6_addr));
2054 			break;
2055 		}
2056 		if (changed)
2057 			br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
2058 unlock_continue:
2059 		spin_unlock_bh(&br->multicast_lock);
2060 	}
2061 
2062 	return err;
2063 }
2064 #endif
2065 
2066 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
2067 					    struct net_bridge_port *port,
2068 					    __be32 saddr)
2069 {
2070 	if (!timer_pending(&br->ip4_own_query.timer) &&
2071 	    !timer_pending(&br->ip4_other_query.timer))
2072 		goto update;
2073 
2074 	if (!br->ip4_querier.addr.u.ip4)
2075 		goto update;
2076 
2077 	if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
2078 		goto update;
2079 
2080 	return false;
2081 
2082 update:
2083 	br->ip4_querier.addr.u.ip4 = saddr;
2084 
2085 	/* update protected by general multicast_lock by caller */
2086 	rcu_assign_pointer(br->ip4_querier.port, port);
2087 
2088 	return true;
2089 }
2090 
2091 #if IS_ENABLED(CONFIG_IPV6)
2092 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
2093 					    struct net_bridge_port *port,
2094 					    struct in6_addr *saddr)
2095 {
2096 	if (!timer_pending(&br->ip6_own_query.timer) &&
2097 	    !timer_pending(&br->ip6_other_query.timer))
2098 		goto update;
2099 
2100 	if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
2101 		goto update;
2102 
2103 	return false;
2104 
2105 update:
2106 	br->ip6_querier.addr.u.ip6 = *saddr;
2107 
2108 	/* update protected by general multicast_lock by caller */
2109 	rcu_assign_pointer(br->ip6_querier.port, port);
2110 
2111 	return true;
2112 }
2113 #endif
2114 
2115 static bool br_multicast_select_querier(struct net_bridge *br,
2116 					struct net_bridge_port *port,
2117 					struct br_ip *saddr)
2118 {
2119 	switch (saddr->proto) {
2120 	case htons(ETH_P_IP):
2121 		return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
2122 #if IS_ENABLED(CONFIG_IPV6)
2123 	case htons(ETH_P_IPV6):
2124 		return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
2125 #endif
2126 	}
2127 
2128 	return false;
2129 }
2130 
2131 static void
2132 br_multicast_update_query_timer(struct net_bridge *br,
2133 				struct bridge_mcast_other_query *query,
2134 				unsigned long max_delay)
2135 {
2136 	if (!timer_pending(&query->timer))
2137 		query->delay_time = jiffies + max_delay;
2138 
2139 	mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
2140 }
2141 
2142 static void br_port_mc_router_state_change(struct net_bridge_port *p,
2143 					   bool is_mc_router)
2144 {
2145 	struct switchdev_attr attr = {
2146 		.orig_dev = p->dev,
2147 		.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
2148 		.flags = SWITCHDEV_F_DEFER,
2149 		.u.mrouter = is_mc_router,
2150 	};
2151 
2152 	switchdev_port_attr_set(p->dev, &attr);
2153 }
2154 
2155 /*
2156  * Add port to router_list
2157  *  list is maintained ordered by pointer value
2158  *  and locked by br->multicast_lock and RCU
2159  */
2160 static void br_multicast_add_router(struct net_bridge *br,
2161 				    struct net_bridge_port *port)
2162 {
2163 	struct net_bridge_port *p;
2164 	struct hlist_node *slot = NULL;
2165 
2166 	if (!hlist_unhashed(&port->rlist))
2167 		return;
2168 
2169 	hlist_for_each_entry(p, &br->router_list, rlist) {
2170 		if ((unsigned long) port >= (unsigned long) p)
2171 			break;
2172 		slot = &p->rlist;
2173 	}
2174 
2175 	if (slot)
2176 		hlist_add_behind_rcu(&port->rlist, slot);
2177 	else
2178 		hlist_add_head_rcu(&port->rlist, &br->router_list);
2179 	br_rtr_notify(br->dev, port, RTM_NEWMDB);
2180 	br_port_mc_router_state_change(port, true);
2181 }
2182 
2183 static void br_multicast_mark_router(struct net_bridge *br,
2184 				     struct net_bridge_port *port)
2185 {
2186 	unsigned long now = jiffies;
2187 
2188 	if (!port) {
2189 		if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
2190 			if (!timer_pending(&br->multicast_router_timer))
2191 				br_mc_router_state_change(br, true);
2192 			mod_timer(&br->multicast_router_timer,
2193 				  now + br->multicast_querier_interval);
2194 		}
2195 		return;
2196 	}
2197 
2198 	if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
2199 	    port->multicast_router == MDB_RTR_TYPE_PERM)
2200 		return;
2201 
2202 	br_multicast_add_router(br, port);
2203 
2204 	mod_timer(&port->multicast_router_timer,
2205 		  now + br->multicast_querier_interval);
2206 }
2207 
2208 static void br_multicast_query_received(struct net_bridge *br,
2209 					struct net_bridge_port *port,
2210 					struct bridge_mcast_other_query *query,
2211 					struct br_ip *saddr,
2212 					unsigned long max_delay)
2213 {
2214 	if (!br_multicast_select_querier(br, port, saddr))
2215 		return;
2216 
2217 	br_multicast_update_query_timer(br, query, max_delay);
2218 	br_multicast_mark_router(br, port);
2219 }
2220 
2221 static void br_ip4_multicast_query(struct net_bridge *br,
2222 				   struct net_bridge_port *port,
2223 				   struct sk_buff *skb,
2224 				   u16 vid)
2225 {
2226 	unsigned int transport_len = ip_transport_len(skb);
2227 	const struct iphdr *iph = ip_hdr(skb);
2228 	struct igmphdr *ih = igmp_hdr(skb);
2229 	struct net_bridge_mdb_entry *mp;
2230 	struct igmpv3_query *ih3;
2231 	struct net_bridge_port_group *p;
2232 	struct net_bridge_port_group __rcu **pp;
2233 	struct br_ip saddr;
2234 	unsigned long max_delay;
2235 	unsigned long now = jiffies;
2236 	__be32 group;
2237 
2238 	spin_lock(&br->multicast_lock);
2239 	if (!netif_running(br->dev) ||
2240 	    (port && port->state == BR_STATE_DISABLED))
2241 		goto out;
2242 
2243 	group = ih->group;
2244 
2245 	if (transport_len == sizeof(*ih)) {
2246 		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
2247 
2248 		if (!max_delay) {
2249 			max_delay = 10 * HZ;
2250 			group = 0;
2251 		}
2252 	} else if (transport_len >= sizeof(*ih3)) {
2253 		ih3 = igmpv3_query_hdr(skb);
2254 		if (ih3->nsrcs ||
2255 		    (br->multicast_igmp_version == 3 && group && ih3->suppress))
2256 			goto out;
2257 
2258 		max_delay = ih3->code ?
2259 			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
2260 	} else {
2261 		goto out;
2262 	}
2263 
2264 	if (!group) {
2265 		saddr.proto = htons(ETH_P_IP);
2266 		saddr.u.ip4 = iph->saddr;
2267 
2268 		br_multicast_query_received(br, port, &br->ip4_other_query,
2269 					    &saddr, max_delay);
2270 		goto out;
2271 	}
2272 
2273 	mp = br_mdb_ip4_get(br, group, vid);
2274 	if (!mp)
2275 		goto out;
2276 
2277 	max_delay *= br->multicast_last_member_count;
2278 
2279 	if (mp->host_joined &&
2280 	    (timer_pending(&mp->timer) ?
2281 	     time_after(mp->timer.expires, now + max_delay) :
2282 	     try_to_del_timer_sync(&mp->timer) >= 0))
2283 		mod_timer(&mp->timer, now + max_delay);
2284 
2285 	for (pp = &mp->ports;
2286 	     (p = mlock_dereference(*pp, br)) != NULL;
2287 	     pp = &p->next) {
2288 		if (timer_pending(&p->timer) ?
2289 		    time_after(p->timer.expires, now + max_delay) :
2290 		    try_to_del_timer_sync(&p->timer) >= 0 &&
2291 		    (br->multicast_igmp_version == 2 ||
2292 		     p->filter_mode == MCAST_EXCLUDE))
2293 			mod_timer(&p->timer, now + max_delay);
2294 	}
2295 
2296 out:
2297 	spin_unlock(&br->multicast_lock);
2298 }
2299 
2300 #if IS_ENABLED(CONFIG_IPV6)
2301 static int br_ip6_multicast_query(struct net_bridge *br,
2302 				  struct net_bridge_port *port,
2303 				  struct sk_buff *skb,
2304 				  u16 vid)
2305 {
2306 	unsigned int transport_len = ipv6_transport_len(skb);
2307 	struct mld_msg *mld;
2308 	struct net_bridge_mdb_entry *mp;
2309 	struct mld2_query *mld2q;
2310 	struct net_bridge_port_group *p;
2311 	struct net_bridge_port_group __rcu **pp;
2312 	struct br_ip saddr;
2313 	unsigned long max_delay;
2314 	unsigned long now = jiffies;
2315 	unsigned int offset = skb_transport_offset(skb);
2316 	const struct in6_addr *group = NULL;
2317 	bool is_general_query;
2318 	int err = 0;
2319 
2320 	spin_lock(&br->multicast_lock);
2321 	if (!netif_running(br->dev) ||
2322 	    (port && port->state == BR_STATE_DISABLED))
2323 		goto out;
2324 
2325 	if (transport_len == sizeof(*mld)) {
2326 		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
2327 			err = -EINVAL;
2328 			goto out;
2329 		}
2330 		mld = (struct mld_msg *) icmp6_hdr(skb);
2331 		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
2332 		if (max_delay)
2333 			group = &mld->mld_mca;
2334 	} else {
2335 		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
2336 			err = -EINVAL;
2337 			goto out;
2338 		}
2339 		mld2q = (struct mld2_query *)icmp6_hdr(skb);
2340 		if (!mld2q->mld2q_nsrcs)
2341 			group = &mld2q->mld2q_mca;
2342 		if (br->multicast_mld_version == 2 &&
2343 		    !ipv6_addr_any(&mld2q->mld2q_mca) &&
2344 		    mld2q->mld2q_suppress)
2345 			goto out;
2346 
2347 		max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
2348 	}
2349 
2350 	is_general_query = group && ipv6_addr_any(group);
2351 
2352 	if (is_general_query) {
2353 		saddr.proto = htons(ETH_P_IPV6);
2354 		saddr.u.ip6 = ipv6_hdr(skb)->saddr;
2355 
2356 		br_multicast_query_received(br, port, &br->ip6_other_query,
2357 					    &saddr, max_delay);
2358 		goto out;
2359 	} else if (!group) {
2360 		goto out;
2361 	}
2362 
2363 	mp = br_mdb_ip6_get(br, group, vid);
2364 	if (!mp)
2365 		goto out;
2366 
2367 	max_delay *= br->multicast_last_member_count;
2368 	if (mp->host_joined &&
2369 	    (timer_pending(&mp->timer) ?
2370 	     time_after(mp->timer.expires, now + max_delay) :
2371 	     try_to_del_timer_sync(&mp->timer) >= 0))
2372 		mod_timer(&mp->timer, now + max_delay);
2373 
2374 	for (pp = &mp->ports;
2375 	     (p = mlock_dereference(*pp, br)) != NULL;
2376 	     pp = &p->next) {
2377 		if (timer_pending(&p->timer) ?
2378 		    time_after(p->timer.expires, now + max_delay) :
2379 		    try_to_del_timer_sync(&p->timer) >= 0 &&
2380 		    (br->multicast_mld_version == 1 ||
2381 		     p->filter_mode == MCAST_EXCLUDE))
2382 			mod_timer(&p->timer, now + max_delay);
2383 	}
2384 
2385 out:
2386 	spin_unlock(&br->multicast_lock);
2387 	return err;
2388 }
2389 #endif
2390 
2391 static void
2392 br_multicast_leave_group(struct net_bridge *br,
2393 			 struct net_bridge_port *port,
2394 			 struct br_ip *group,
2395 			 struct bridge_mcast_other_query *other_query,
2396 			 struct bridge_mcast_own_query *own_query,
2397 			 const unsigned char *src)
2398 {
2399 	struct net_bridge_mdb_entry *mp;
2400 	struct net_bridge_port_group *p;
2401 	unsigned long now;
2402 	unsigned long time;
2403 
2404 	spin_lock(&br->multicast_lock);
2405 	if (!netif_running(br->dev) ||
2406 	    (port && port->state == BR_STATE_DISABLED))
2407 		goto out;
2408 
2409 	mp = br_mdb_ip_get(br, group);
2410 	if (!mp)
2411 		goto out;
2412 
2413 	if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
2414 		struct net_bridge_port_group __rcu **pp;
2415 
2416 		for (pp = &mp->ports;
2417 		     (p = mlock_dereference(*pp, br)) != NULL;
2418 		     pp = &p->next) {
2419 			if (!br_port_group_equal(p, port, src))
2420 				continue;
2421 
2422 			if (p->flags & MDB_PG_FLAGS_PERMANENT)
2423 				break;
2424 
2425 			p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2426 			br_multicast_del_pg(mp, p, pp);
2427 		}
2428 		goto out;
2429 	}
2430 
2431 	if (timer_pending(&other_query->timer))
2432 		goto out;
2433 
2434 	if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
2435 		__br_multicast_send_query(br, port, NULL, NULL, &mp->addr,
2436 					  false, 0, NULL);
2437 
2438 		time = jiffies + br->multicast_last_member_count *
2439 				 br->multicast_last_member_interval;
2440 
2441 		mod_timer(&own_query->timer, time);
2442 
2443 		for (p = mlock_dereference(mp->ports, br);
2444 		     p != NULL;
2445 		     p = mlock_dereference(p->next, br)) {
2446 			if (!br_port_group_equal(p, port, src))
2447 				continue;
2448 
2449 			if (!hlist_unhashed(&p->mglist) &&
2450 			    (timer_pending(&p->timer) ?
2451 			     time_after(p->timer.expires, time) :
2452 			     try_to_del_timer_sync(&p->timer) >= 0)) {
2453 				mod_timer(&p->timer, time);
2454 			}
2455 
2456 			break;
2457 		}
2458 	}
2459 
2460 	now = jiffies;
2461 	time = now + br->multicast_last_member_count *
2462 		     br->multicast_last_member_interval;
2463 
2464 	if (!port) {
2465 		if (mp->host_joined &&
2466 		    (timer_pending(&mp->timer) ?
2467 		     time_after(mp->timer.expires, time) :
2468 		     try_to_del_timer_sync(&mp->timer) >= 0)) {
2469 			mod_timer(&mp->timer, time);
2470 		}
2471 
2472 		goto out;
2473 	}
2474 
2475 	for (p = mlock_dereference(mp->ports, br);
2476 	     p != NULL;
2477 	     p = mlock_dereference(p->next, br)) {
2478 		if (p->port != port)
2479 			continue;
2480 
2481 		if (!hlist_unhashed(&p->mglist) &&
2482 		    (timer_pending(&p->timer) ?
2483 		     time_after(p->timer.expires, time) :
2484 		     try_to_del_timer_sync(&p->timer) >= 0)) {
2485 			mod_timer(&p->timer, time);
2486 		}
2487 
2488 		break;
2489 	}
2490 out:
2491 	spin_unlock(&br->multicast_lock);
2492 }
2493 
2494 static void br_ip4_multicast_leave_group(struct net_bridge *br,
2495 					 struct net_bridge_port *port,
2496 					 __be32 group,
2497 					 __u16 vid,
2498 					 const unsigned char *src)
2499 {
2500 	struct br_ip br_group;
2501 	struct bridge_mcast_own_query *own_query;
2502 
2503 	if (ipv4_is_local_multicast(group))
2504 		return;
2505 
2506 	own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
2507 
2508 	memset(&br_group, 0, sizeof(br_group));
2509 	br_group.u.ip4 = group;
2510 	br_group.proto = htons(ETH_P_IP);
2511 	br_group.vid = vid;
2512 
2513 	br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
2514 				 own_query, src);
2515 }
2516 
2517 #if IS_ENABLED(CONFIG_IPV6)
2518 static void br_ip6_multicast_leave_group(struct net_bridge *br,
2519 					 struct net_bridge_port *port,
2520 					 const struct in6_addr *group,
2521 					 __u16 vid,
2522 					 const unsigned char *src)
2523 {
2524 	struct br_ip br_group;
2525 	struct bridge_mcast_own_query *own_query;
2526 
2527 	if (ipv6_addr_is_ll_all_nodes(group))
2528 		return;
2529 
2530 	own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
2531 
2532 	memset(&br_group, 0, sizeof(br_group));
2533 	br_group.u.ip6 = *group;
2534 	br_group.proto = htons(ETH_P_IPV6);
2535 	br_group.vid = vid;
2536 
2537 	br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
2538 				 own_query, src);
2539 }
2540 #endif
2541 
2542 static void br_multicast_err_count(const struct net_bridge *br,
2543 				   const struct net_bridge_port *p,
2544 				   __be16 proto)
2545 {
2546 	struct bridge_mcast_stats __percpu *stats;
2547 	struct bridge_mcast_stats *pstats;
2548 
2549 	if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2550 		return;
2551 
2552 	if (p)
2553 		stats = p->mcast_stats;
2554 	else
2555 		stats = br->mcast_stats;
2556 	if (WARN_ON(!stats))
2557 		return;
2558 
2559 	pstats = this_cpu_ptr(stats);
2560 
2561 	u64_stats_update_begin(&pstats->syncp);
2562 	switch (proto) {
2563 	case htons(ETH_P_IP):
2564 		pstats->mstats.igmp_parse_errors++;
2565 		break;
2566 #if IS_ENABLED(CONFIG_IPV6)
2567 	case htons(ETH_P_IPV6):
2568 		pstats->mstats.mld_parse_errors++;
2569 		break;
2570 #endif
2571 	}
2572 	u64_stats_update_end(&pstats->syncp);
2573 }
2574 
2575 static void br_multicast_pim(struct net_bridge *br,
2576 			     struct net_bridge_port *port,
2577 			     const struct sk_buff *skb)
2578 {
2579 	unsigned int offset = skb_transport_offset(skb);
2580 	struct pimhdr *pimhdr, _pimhdr;
2581 
2582 	pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
2583 	if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
2584 	    pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
2585 		return;
2586 
2587 	br_multicast_mark_router(br, port);
2588 }
2589 
2590 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
2591 				    struct net_bridge_port *port,
2592 				    struct sk_buff *skb)
2593 {
2594 	if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
2595 	    igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
2596 		return -ENOMSG;
2597 
2598 	br_multicast_mark_router(br, port);
2599 
2600 	return 0;
2601 }
2602 
2603 static int br_multicast_ipv4_rcv(struct net_bridge *br,
2604 				 struct net_bridge_port *port,
2605 				 struct sk_buff *skb,
2606 				 u16 vid)
2607 {
2608 	const unsigned char *src;
2609 	struct igmphdr *ih;
2610 	int err;
2611 
2612 	err = ip_mc_check_igmp(skb);
2613 
2614 	if (err == -ENOMSG) {
2615 		if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
2616 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2617 		} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
2618 			if (ip_hdr(skb)->protocol == IPPROTO_PIM)
2619 				br_multicast_pim(br, port, skb);
2620 		} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
2621 			br_ip4_multicast_mrd_rcv(br, port, skb);
2622 		}
2623 
2624 		return 0;
2625 	} else if (err < 0) {
2626 		br_multicast_err_count(br, port, skb->protocol);
2627 		return err;
2628 	}
2629 
2630 	ih = igmp_hdr(skb);
2631 	src = eth_hdr(skb)->h_source;
2632 	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
2633 
2634 	switch (ih->type) {
2635 	case IGMP_HOST_MEMBERSHIP_REPORT:
2636 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
2637 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2638 		err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
2639 						 true);
2640 		break;
2641 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
2642 		err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
2643 		break;
2644 	case IGMP_HOST_MEMBERSHIP_QUERY:
2645 		br_ip4_multicast_query(br, port, skb, vid);
2646 		break;
2647 	case IGMP_HOST_LEAVE_MESSAGE:
2648 		br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
2649 		break;
2650 	}
2651 
2652 	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
2653 			   BR_MCAST_DIR_RX);
2654 
2655 	return err;
2656 }
2657 
2658 #if IS_ENABLED(CONFIG_IPV6)
2659 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
2660 				    struct net_bridge_port *port,
2661 				    struct sk_buff *skb)
2662 {
2663 	int ret;
2664 
2665 	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
2666 		return -ENOMSG;
2667 
2668 	ret = ipv6_mc_check_icmpv6(skb);
2669 	if (ret < 0)
2670 		return ret;
2671 
2672 	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
2673 		return -ENOMSG;
2674 
2675 	br_multicast_mark_router(br, port);
2676 
2677 	return 0;
2678 }
2679 
2680 static int br_multicast_ipv6_rcv(struct net_bridge *br,
2681 				 struct net_bridge_port *port,
2682 				 struct sk_buff *skb,
2683 				 u16 vid)
2684 {
2685 	const unsigned char *src;
2686 	struct mld_msg *mld;
2687 	int err;
2688 
2689 	err = ipv6_mc_check_mld(skb);
2690 
2691 	if (err == -ENOMSG) {
2692 		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
2693 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2694 
2695 		if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
2696 			err = br_ip6_multicast_mrd_rcv(br, port, skb);
2697 
2698 			if (err < 0 && err != -ENOMSG) {
2699 				br_multicast_err_count(br, port, skb->protocol);
2700 				return err;
2701 			}
2702 		}
2703 
2704 		return 0;
2705 	} else if (err < 0) {
2706 		br_multicast_err_count(br, port, skb->protocol);
2707 		return err;
2708 	}
2709 
2710 	mld = (struct mld_msg *)skb_transport_header(skb);
2711 	BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
2712 
2713 	switch (mld->mld_type) {
2714 	case ICMPV6_MGM_REPORT:
2715 		src = eth_hdr(skb)->h_source;
2716 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2717 		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
2718 						 src, true);
2719 		break;
2720 	case ICMPV6_MLD2_REPORT:
2721 		err = br_ip6_multicast_mld2_report(br, port, skb, vid);
2722 		break;
2723 	case ICMPV6_MGM_QUERY:
2724 		err = br_ip6_multicast_query(br, port, skb, vid);
2725 		break;
2726 	case ICMPV6_MGM_REDUCTION:
2727 		src = eth_hdr(skb)->h_source;
2728 		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
2729 		break;
2730 	}
2731 
2732 	br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
2733 			   BR_MCAST_DIR_RX);
2734 
2735 	return err;
2736 }
2737 #endif
2738 
2739 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
2740 		     struct sk_buff *skb, u16 vid)
2741 {
2742 	int ret = 0;
2743 
2744 	BR_INPUT_SKB_CB(skb)->igmp = 0;
2745 	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
2746 
2747 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2748 		return 0;
2749 
2750 	switch (skb->protocol) {
2751 	case htons(ETH_P_IP):
2752 		ret = br_multicast_ipv4_rcv(br, port, skb, vid);
2753 		break;
2754 #if IS_ENABLED(CONFIG_IPV6)
2755 	case htons(ETH_P_IPV6):
2756 		ret = br_multicast_ipv6_rcv(br, port, skb, vid);
2757 		break;
2758 #endif
2759 	}
2760 
2761 	return ret;
2762 }
2763 
2764 static void br_multicast_query_expired(struct net_bridge *br,
2765 				       struct bridge_mcast_own_query *query,
2766 				       struct bridge_mcast_querier *querier)
2767 {
2768 	spin_lock(&br->multicast_lock);
2769 	if (query->startup_sent < br->multicast_startup_query_count)
2770 		query->startup_sent++;
2771 
2772 	RCU_INIT_POINTER(querier->port, NULL);
2773 	br_multicast_send_query(br, NULL, query);
2774 	spin_unlock(&br->multicast_lock);
2775 }
2776 
2777 static void br_ip4_multicast_query_expired(struct timer_list *t)
2778 {
2779 	struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
2780 
2781 	br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
2782 }
2783 
2784 #if IS_ENABLED(CONFIG_IPV6)
2785 static void br_ip6_multicast_query_expired(struct timer_list *t)
2786 {
2787 	struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
2788 
2789 	br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
2790 }
2791 #endif
2792 
2793 static void br_multicast_gc_work(struct work_struct *work)
2794 {
2795 	struct net_bridge *br = container_of(work, struct net_bridge,
2796 					     mcast_gc_work);
2797 	HLIST_HEAD(deleted_head);
2798 
2799 	spin_lock_bh(&br->multicast_lock);
2800 	hlist_move_list(&br->mcast_gc_list, &deleted_head);
2801 	spin_unlock_bh(&br->multicast_lock);
2802 
2803 	br_multicast_gc(&deleted_head);
2804 }
2805 
2806 void br_multicast_init(struct net_bridge *br)
2807 {
2808 	br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
2809 
2810 	br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2811 	br->multicast_last_member_count = 2;
2812 	br->multicast_startup_query_count = 2;
2813 
2814 	br->multicast_last_member_interval = HZ;
2815 	br->multicast_query_response_interval = 10 * HZ;
2816 	br->multicast_startup_query_interval = 125 * HZ / 4;
2817 	br->multicast_query_interval = 125 * HZ;
2818 	br->multicast_querier_interval = 255 * HZ;
2819 	br->multicast_membership_interval = 260 * HZ;
2820 
2821 	br->ip4_other_query.delay_time = 0;
2822 	br->ip4_querier.port = NULL;
2823 	br->multicast_igmp_version = 2;
2824 #if IS_ENABLED(CONFIG_IPV6)
2825 	br->multicast_mld_version = 1;
2826 	br->ip6_other_query.delay_time = 0;
2827 	br->ip6_querier.port = NULL;
2828 #endif
2829 	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
2830 	br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
2831 
2832 	spin_lock_init(&br->multicast_lock);
2833 	timer_setup(&br->multicast_router_timer,
2834 		    br_multicast_local_router_expired, 0);
2835 	timer_setup(&br->ip4_other_query.timer,
2836 		    br_ip4_multicast_querier_expired, 0);
2837 	timer_setup(&br->ip4_own_query.timer,
2838 		    br_ip4_multicast_query_expired, 0);
2839 #if IS_ENABLED(CONFIG_IPV6)
2840 	timer_setup(&br->ip6_other_query.timer,
2841 		    br_ip6_multicast_querier_expired, 0);
2842 	timer_setup(&br->ip6_own_query.timer,
2843 		    br_ip6_multicast_query_expired, 0);
2844 #endif
2845 	INIT_HLIST_HEAD(&br->mdb_list);
2846 	INIT_HLIST_HEAD(&br->mcast_gc_list);
2847 	INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
2848 }
2849 
2850 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
2851 {
2852 	struct in_device *in_dev = in_dev_get(br->dev);
2853 
2854 	if (!in_dev)
2855 		return;
2856 
2857 	__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2858 	in_dev_put(in_dev);
2859 }
2860 
2861 #if IS_ENABLED(CONFIG_IPV6)
2862 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
2863 {
2864 	struct in6_addr addr;
2865 
2866 	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2867 	ipv6_dev_mc_inc(br->dev, &addr);
2868 }
2869 #else
2870 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
2871 {
2872 }
2873 #endif
2874 
2875 static void br_multicast_join_snoopers(struct net_bridge *br)
2876 {
2877 	br_ip4_multicast_join_snoopers(br);
2878 	br_ip6_multicast_join_snoopers(br);
2879 }
2880 
2881 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
2882 {
2883 	struct in_device *in_dev = in_dev_get(br->dev);
2884 
2885 	if (WARN_ON(!in_dev))
2886 		return;
2887 
2888 	__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2889 	in_dev_put(in_dev);
2890 }
2891 
2892 #if IS_ENABLED(CONFIG_IPV6)
2893 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2894 {
2895 	struct in6_addr addr;
2896 
2897 	ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2898 	ipv6_dev_mc_dec(br->dev, &addr);
2899 }
2900 #else
2901 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2902 {
2903 }
2904 #endif
2905 
2906 static void br_multicast_leave_snoopers(struct net_bridge *br)
2907 {
2908 	br_ip4_multicast_leave_snoopers(br);
2909 	br_ip6_multicast_leave_snoopers(br);
2910 }
2911 
2912 static void __br_multicast_open(struct net_bridge *br,
2913 				struct bridge_mcast_own_query *query)
2914 {
2915 	query->startup_sent = 0;
2916 
2917 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2918 		return;
2919 
2920 	mod_timer(&query->timer, jiffies);
2921 }
2922 
2923 void br_multicast_open(struct net_bridge *br)
2924 {
2925 	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2926 		br_multicast_join_snoopers(br);
2927 
2928 	__br_multicast_open(br, &br->ip4_own_query);
2929 #if IS_ENABLED(CONFIG_IPV6)
2930 	__br_multicast_open(br, &br->ip6_own_query);
2931 #endif
2932 }
2933 
2934 void br_multicast_stop(struct net_bridge *br)
2935 {
2936 	del_timer_sync(&br->multicast_router_timer);
2937 	del_timer_sync(&br->ip4_other_query.timer);
2938 	del_timer_sync(&br->ip4_own_query.timer);
2939 #if IS_ENABLED(CONFIG_IPV6)
2940 	del_timer_sync(&br->ip6_other_query.timer);
2941 	del_timer_sync(&br->ip6_own_query.timer);
2942 #endif
2943 
2944 	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2945 		br_multicast_leave_snoopers(br);
2946 }
2947 
2948 void br_multicast_dev_del(struct net_bridge *br)
2949 {
2950 	struct net_bridge_mdb_entry *mp;
2951 	HLIST_HEAD(deleted_head);
2952 	struct hlist_node *tmp;
2953 
2954 	spin_lock_bh(&br->multicast_lock);
2955 	hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
2956 		br_multicast_del_mdb_entry(mp);
2957 	hlist_move_list(&br->mcast_gc_list, &deleted_head);
2958 	spin_unlock_bh(&br->multicast_lock);
2959 
2960 	br_multicast_gc(&deleted_head);
2961 	cancel_work_sync(&br->mcast_gc_work);
2962 
2963 	rcu_barrier();
2964 }
2965 
2966 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
2967 {
2968 	int err = -EINVAL;
2969 
2970 	spin_lock_bh(&br->multicast_lock);
2971 
2972 	switch (val) {
2973 	case MDB_RTR_TYPE_DISABLED:
2974 	case MDB_RTR_TYPE_PERM:
2975 		br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
2976 		del_timer(&br->multicast_router_timer);
2977 		br->multicast_router = val;
2978 		err = 0;
2979 		break;
2980 	case MDB_RTR_TYPE_TEMP_QUERY:
2981 		if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
2982 			br_mc_router_state_change(br, false);
2983 		br->multicast_router = val;
2984 		err = 0;
2985 		break;
2986 	}
2987 
2988 	spin_unlock_bh(&br->multicast_lock);
2989 
2990 	return err;
2991 }
2992 
2993 static void __del_port_router(struct net_bridge_port *p)
2994 {
2995 	if (hlist_unhashed(&p->rlist))
2996 		return;
2997 	hlist_del_init_rcu(&p->rlist);
2998 	br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2999 	br_port_mc_router_state_change(p, false);
3000 
3001 	/* don't allow timer refresh */
3002 	if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3003 		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3004 }
3005 
3006 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
3007 {
3008 	struct net_bridge *br = p->br;
3009 	unsigned long now = jiffies;
3010 	int err = -EINVAL;
3011 
3012 	spin_lock(&br->multicast_lock);
3013 	if (p->multicast_router == val) {
3014 		/* Refresh the temp router port timer */
3015 		if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3016 			mod_timer(&p->multicast_router_timer,
3017 				  now + br->multicast_querier_interval);
3018 		err = 0;
3019 		goto unlock;
3020 	}
3021 	switch (val) {
3022 	case MDB_RTR_TYPE_DISABLED:
3023 		p->multicast_router = MDB_RTR_TYPE_DISABLED;
3024 		__del_port_router(p);
3025 		del_timer(&p->multicast_router_timer);
3026 		break;
3027 	case MDB_RTR_TYPE_TEMP_QUERY:
3028 		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3029 		__del_port_router(p);
3030 		break;
3031 	case MDB_RTR_TYPE_PERM:
3032 		p->multicast_router = MDB_RTR_TYPE_PERM;
3033 		del_timer(&p->multicast_router_timer);
3034 		br_multicast_add_router(br, p);
3035 		break;
3036 	case MDB_RTR_TYPE_TEMP:
3037 		p->multicast_router = MDB_RTR_TYPE_TEMP;
3038 		br_multicast_mark_router(br, p);
3039 		break;
3040 	default:
3041 		goto unlock;
3042 	}
3043 	err = 0;
3044 unlock:
3045 	spin_unlock(&br->multicast_lock);
3046 
3047 	return err;
3048 }
3049 
3050 static void br_multicast_start_querier(struct net_bridge *br,
3051 				       struct bridge_mcast_own_query *query)
3052 {
3053 	struct net_bridge_port *port;
3054 
3055 	__br_multicast_open(br, query);
3056 
3057 	rcu_read_lock();
3058 	list_for_each_entry_rcu(port, &br->port_list, list) {
3059 		if (port->state == BR_STATE_DISABLED ||
3060 		    port->state == BR_STATE_BLOCKING)
3061 			continue;
3062 
3063 		if (query == &br->ip4_own_query)
3064 			br_multicast_enable(&port->ip4_own_query);
3065 #if IS_ENABLED(CONFIG_IPV6)
3066 		else
3067 			br_multicast_enable(&port->ip6_own_query);
3068 #endif
3069 	}
3070 	rcu_read_unlock();
3071 }
3072 
3073 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3074 {
3075 	struct net_bridge_port *port;
3076 
3077 	spin_lock_bh(&br->multicast_lock);
3078 	if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
3079 		goto unlock;
3080 
3081 	br_mc_disabled_update(br->dev, val);
3082 	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
3083 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
3084 		br_multicast_leave_snoopers(br);
3085 		goto unlock;
3086 	}
3087 
3088 	if (!netif_running(br->dev))
3089 		goto unlock;
3090 
3091 	br_multicast_open(br);
3092 	list_for_each_entry(port, &br->port_list, list)
3093 		__br_multicast_enable_port(port);
3094 
3095 unlock:
3096 	spin_unlock_bh(&br->multicast_lock);
3097 
3098 	return 0;
3099 }
3100 
3101 bool br_multicast_enabled(const struct net_device *dev)
3102 {
3103 	struct net_bridge *br = netdev_priv(dev);
3104 
3105 	return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
3106 }
3107 EXPORT_SYMBOL_GPL(br_multicast_enabled);
3108 
3109 bool br_multicast_router(const struct net_device *dev)
3110 {
3111 	struct net_bridge *br = netdev_priv(dev);
3112 	bool is_router;
3113 
3114 	spin_lock_bh(&br->multicast_lock);
3115 	is_router = br_multicast_is_router(br);
3116 	spin_unlock_bh(&br->multicast_lock);
3117 	return is_router;
3118 }
3119 EXPORT_SYMBOL_GPL(br_multicast_router);
3120 
3121 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
3122 {
3123 	unsigned long max_delay;
3124 
3125 	val = !!val;
3126 
3127 	spin_lock_bh(&br->multicast_lock);
3128 	if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
3129 		goto unlock;
3130 
3131 	br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
3132 	if (!val)
3133 		goto unlock;
3134 
3135 	max_delay = br->multicast_query_response_interval;
3136 
3137 	if (!timer_pending(&br->ip4_other_query.timer))
3138 		br->ip4_other_query.delay_time = jiffies + max_delay;
3139 
3140 	br_multicast_start_querier(br, &br->ip4_own_query);
3141 
3142 #if IS_ENABLED(CONFIG_IPV6)
3143 	if (!timer_pending(&br->ip6_other_query.timer))
3144 		br->ip6_other_query.delay_time = jiffies + max_delay;
3145 
3146 	br_multicast_start_querier(br, &br->ip6_own_query);
3147 #endif
3148 
3149 unlock:
3150 	spin_unlock_bh(&br->multicast_lock);
3151 
3152 	return 0;
3153 }
3154 
3155 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
3156 {
3157 	/* Currently we support only version 2 and 3 */
3158 	switch (val) {
3159 	case 2:
3160 	case 3:
3161 		break;
3162 	default:
3163 		return -EINVAL;
3164 	}
3165 
3166 	spin_lock_bh(&br->multicast_lock);
3167 	br->multicast_igmp_version = val;
3168 	spin_unlock_bh(&br->multicast_lock);
3169 
3170 	return 0;
3171 }
3172 
3173 #if IS_ENABLED(CONFIG_IPV6)
3174 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
3175 {
3176 	/* Currently we support version 1 and 2 */
3177 	switch (val) {
3178 	case 1:
3179 	case 2:
3180 		break;
3181 	default:
3182 		return -EINVAL;
3183 	}
3184 
3185 	spin_lock_bh(&br->multicast_lock);
3186 	br->multicast_mld_version = val;
3187 	spin_unlock_bh(&br->multicast_lock);
3188 
3189 	return 0;
3190 }
3191 #endif
3192 
3193 /**
3194  * br_multicast_list_adjacent - Returns snooped multicast addresses
3195  * @dev:	The bridge port adjacent to which to retrieve addresses
3196  * @br_ip_list:	The list to store found, snooped multicast IP addresses in
3197  *
3198  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
3199  * snooping feature on all bridge ports of dev's bridge device, excluding
3200  * the addresses from dev itself.
3201  *
3202  * Returns the number of items added to br_ip_list.
3203  *
3204  * Notes:
3205  * - br_ip_list needs to be initialized by caller
3206  * - br_ip_list might contain duplicates in the end
3207  *   (needs to be taken care of by caller)
3208  * - br_ip_list needs to be freed by caller
3209  */
3210 int br_multicast_list_adjacent(struct net_device *dev,
3211 			       struct list_head *br_ip_list)
3212 {
3213 	struct net_bridge *br;
3214 	struct net_bridge_port *port;
3215 	struct net_bridge_port_group *group;
3216 	struct br_ip_list *entry;
3217 	int count = 0;
3218 
3219 	rcu_read_lock();
3220 	if (!br_ip_list || !netif_is_bridge_port(dev))
3221 		goto unlock;
3222 
3223 	port = br_port_get_rcu(dev);
3224 	if (!port || !port->br)
3225 		goto unlock;
3226 
3227 	br = port->br;
3228 
3229 	list_for_each_entry_rcu(port, &br->port_list, list) {
3230 		if (!port->dev || port->dev == dev)
3231 			continue;
3232 
3233 		hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
3234 			entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
3235 			if (!entry)
3236 				goto unlock;
3237 
3238 			entry->addr = group->addr;
3239 			list_add(&entry->list, br_ip_list);
3240 			count++;
3241 		}
3242 	}
3243 
3244 unlock:
3245 	rcu_read_unlock();
3246 	return count;
3247 }
3248 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
3249 
3250 /**
3251  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
3252  * @dev: The bridge port providing the bridge on which to check for a querier
3253  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3254  *
3255  * Checks whether the given interface has a bridge on top and if so returns
3256  * true if a valid querier exists anywhere on the bridged link layer.
3257  * Otherwise returns false.
3258  */
3259 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
3260 {
3261 	struct net_bridge *br;
3262 	struct net_bridge_port *port;
3263 	struct ethhdr eth;
3264 	bool ret = false;
3265 
3266 	rcu_read_lock();
3267 	if (!netif_is_bridge_port(dev))
3268 		goto unlock;
3269 
3270 	port = br_port_get_rcu(dev);
3271 	if (!port || !port->br)
3272 		goto unlock;
3273 
3274 	br = port->br;
3275 
3276 	memset(&eth, 0, sizeof(eth));
3277 	eth.h_proto = htons(proto);
3278 
3279 	ret = br_multicast_querier_exists(br, &eth);
3280 
3281 unlock:
3282 	rcu_read_unlock();
3283 	return ret;
3284 }
3285 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
3286 
3287 /**
3288  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
3289  * @dev: The bridge port adjacent to which to check for a querier
3290  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3291  *
3292  * Checks whether the given interface has a bridge on top and if so returns
3293  * true if a selected querier is behind one of the other ports of this
3294  * bridge. Otherwise returns false.
3295  */
3296 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
3297 {
3298 	struct net_bridge *br;
3299 	struct net_bridge_port *port;
3300 	bool ret = false;
3301 
3302 	rcu_read_lock();
3303 	if (!netif_is_bridge_port(dev))
3304 		goto unlock;
3305 
3306 	port = br_port_get_rcu(dev);
3307 	if (!port || !port->br)
3308 		goto unlock;
3309 
3310 	br = port->br;
3311 
3312 	switch (proto) {
3313 	case ETH_P_IP:
3314 		if (!timer_pending(&br->ip4_other_query.timer) ||
3315 		    rcu_dereference(br->ip4_querier.port) == port)
3316 			goto unlock;
3317 		break;
3318 #if IS_ENABLED(CONFIG_IPV6)
3319 	case ETH_P_IPV6:
3320 		if (!timer_pending(&br->ip6_other_query.timer) ||
3321 		    rcu_dereference(br->ip6_querier.port) == port)
3322 			goto unlock;
3323 		break;
3324 #endif
3325 	default:
3326 		goto unlock;
3327 	}
3328 
3329 	ret = true;
3330 unlock:
3331 	rcu_read_unlock();
3332 	return ret;
3333 }
3334 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
3335 
3336 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
3337 			       const struct sk_buff *skb, u8 type, u8 dir)
3338 {
3339 	struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
3340 	__be16 proto = skb->protocol;
3341 	unsigned int t_len;
3342 
3343 	u64_stats_update_begin(&pstats->syncp);
3344 	switch (proto) {
3345 	case htons(ETH_P_IP):
3346 		t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
3347 		switch (type) {
3348 		case IGMP_HOST_MEMBERSHIP_REPORT:
3349 			pstats->mstats.igmp_v1reports[dir]++;
3350 			break;
3351 		case IGMPV2_HOST_MEMBERSHIP_REPORT:
3352 			pstats->mstats.igmp_v2reports[dir]++;
3353 			break;
3354 		case IGMPV3_HOST_MEMBERSHIP_REPORT:
3355 			pstats->mstats.igmp_v3reports[dir]++;
3356 			break;
3357 		case IGMP_HOST_MEMBERSHIP_QUERY:
3358 			if (t_len != sizeof(struct igmphdr)) {
3359 				pstats->mstats.igmp_v3queries[dir]++;
3360 			} else {
3361 				unsigned int offset = skb_transport_offset(skb);
3362 				struct igmphdr *ih, _ihdr;
3363 
3364 				ih = skb_header_pointer(skb, offset,
3365 							sizeof(_ihdr), &_ihdr);
3366 				if (!ih)
3367 					break;
3368 				if (!ih->code)
3369 					pstats->mstats.igmp_v1queries[dir]++;
3370 				else
3371 					pstats->mstats.igmp_v2queries[dir]++;
3372 			}
3373 			break;
3374 		case IGMP_HOST_LEAVE_MESSAGE:
3375 			pstats->mstats.igmp_leaves[dir]++;
3376 			break;
3377 		}
3378 		break;
3379 #if IS_ENABLED(CONFIG_IPV6)
3380 	case htons(ETH_P_IPV6):
3381 		t_len = ntohs(ipv6_hdr(skb)->payload_len) +
3382 			sizeof(struct ipv6hdr);
3383 		t_len -= skb_network_header_len(skb);
3384 		switch (type) {
3385 		case ICMPV6_MGM_REPORT:
3386 			pstats->mstats.mld_v1reports[dir]++;
3387 			break;
3388 		case ICMPV6_MLD2_REPORT:
3389 			pstats->mstats.mld_v2reports[dir]++;
3390 			break;
3391 		case ICMPV6_MGM_QUERY:
3392 			if (t_len != sizeof(struct mld_msg))
3393 				pstats->mstats.mld_v2queries[dir]++;
3394 			else
3395 				pstats->mstats.mld_v1queries[dir]++;
3396 			break;
3397 		case ICMPV6_MGM_REDUCTION:
3398 			pstats->mstats.mld_leaves[dir]++;
3399 			break;
3400 		}
3401 		break;
3402 #endif /* CONFIG_IPV6 */
3403 	}
3404 	u64_stats_update_end(&pstats->syncp);
3405 }
3406 
3407 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
3408 			const struct sk_buff *skb, u8 type, u8 dir)
3409 {
3410 	struct bridge_mcast_stats __percpu *stats;
3411 
3412 	/* if multicast_disabled is true then igmp type can't be set */
3413 	if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3414 		return;
3415 
3416 	if (p)
3417 		stats = p->mcast_stats;
3418 	else
3419 		stats = br->mcast_stats;
3420 	if (WARN_ON(!stats))
3421 		return;
3422 
3423 	br_mcast_stats_add(stats, skb, type, dir);
3424 }
3425 
3426 int br_multicast_init_stats(struct net_bridge *br)
3427 {
3428 	br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
3429 	if (!br->mcast_stats)
3430 		return -ENOMEM;
3431 
3432 	return 0;
3433 }
3434 
3435 void br_multicast_uninit_stats(struct net_bridge *br)
3436 {
3437 	free_percpu(br->mcast_stats);
3438 }
3439 
3440 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
3441 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
3442 {
3443 	dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
3444 	dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
3445 }
3446 
3447 void br_multicast_get_stats(const struct net_bridge *br,
3448 			    const struct net_bridge_port *p,
3449 			    struct br_mcast_stats *dest)
3450 {
3451 	struct bridge_mcast_stats __percpu *stats;
3452 	struct br_mcast_stats tdst;
3453 	int i;
3454 
3455 	memset(dest, 0, sizeof(*dest));
3456 	if (p)
3457 		stats = p->mcast_stats;
3458 	else
3459 		stats = br->mcast_stats;
3460 	if (WARN_ON(!stats))
3461 		return;
3462 
3463 	memset(&tdst, 0, sizeof(tdst));
3464 	for_each_possible_cpu(i) {
3465 		struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
3466 		struct br_mcast_stats temp;
3467 		unsigned int start;
3468 
3469 		do {
3470 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3471 			memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
3472 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3473 
3474 		mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
3475 		mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
3476 		mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
3477 		mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
3478 		mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
3479 		mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
3480 		mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
3481 		tdst.igmp_parse_errors += temp.igmp_parse_errors;
3482 
3483 		mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
3484 		mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
3485 		mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
3486 		mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
3487 		mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
3488 		tdst.mld_parse_errors += temp.mld_parse_errors;
3489 	}
3490 	memcpy(dest, &tdst, sizeof(*dest));
3491 }
3492 
3493 int br_mdb_hash_init(struct net_bridge *br)
3494 {
3495 	return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
3496 }
3497 
3498 void br_mdb_hash_fini(struct net_bridge *br)
3499 {
3500 	rhashtable_destroy(&br->mdb_hash_tbl);
3501 }
3502