xref: /openbmc/linux/net/bridge/br_mdb.c (revision db66795f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static bool
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 			unsigned long *timer)
22 {
23 	*timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 	return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26 
27 static bool
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 			unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 	*timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 	return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 	*timer = 0;
36 	return false;
37 #endif
38 }
39 
40 static size_t __br_rports_one_size(void)
41 {
42 	return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 	       nla_total_size(sizeof(u8)) +  /* MDBA_ROUTER_PATTR_TYPE */
45 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 	       nla_total_size(sizeof(u32));  /* MDBA_ROUTER_PATTR_VID */
48 }
49 
50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51 {
52 	struct net_bridge_mcast_port *pmctx;
53 	size_t size = nla_total_size(0); /* MDBA_ROUTER */
54 
55 	rcu_read_lock();
56 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 				 ip4_rlist)
58 		size += __br_rports_one_size();
59 
60 #if IS_ENABLED(CONFIG_IPV6)
61 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 				 ip6_rlist)
63 		size += __br_rports_one_size();
64 #endif
65 	rcu_read_unlock();
66 
67 	return size;
68 }
69 
70 int br_rports_fill_info(struct sk_buff *skb,
71 			const struct net_bridge_mcast *brmctx)
72 {
73 	u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 	bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 	unsigned long ip4_timer, ip6_timer;
76 	struct nlattr *nest, *port_nest;
77 	struct net_bridge_port *p;
78 
79 	if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 		return 0;
81 
82 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
83 	if (nest == NULL)
84 		return -EMSGSIZE;
85 
86 	list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 		struct net_bridge_mcast_port *pmctx;
88 
89 		if (vid) {
90 			struct net_bridge_vlan *v;
91 
92 			v = br_vlan_find(nbp_vlan_group(p), vid);
93 			if (!v)
94 				continue;
95 			pmctx = &v->port_mcast_ctx;
96 		} else {
97 			pmctx = &p->multicast_ctx;
98 		}
99 
100 		have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 		have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
102 
103 		if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 			continue;
105 
106 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
107 		if (!port_nest)
108 			goto fail;
109 
110 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 				max(ip4_timer, ip6_timer)) ||
113 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 			       p->multicast_ctx.multicast_router) ||
115 		    (have_ip4_mc_rtr &&
116 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
117 				 ip4_timer)) ||
118 		    (have_ip6_mc_rtr &&
119 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
120 				 ip6_timer)) ||
121 		    (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 			nla_nest_cancel(skb, port_nest);
123 			goto fail;
124 		}
125 		nla_nest_end(skb, port_nest);
126 	}
127 
128 	nla_nest_end(skb, nest);
129 	return 0;
130 fail:
131 	nla_nest_cancel(skb, nest);
132 	return -EMSGSIZE;
133 }
134 
135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136 {
137 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 	e->flags = 0;
139 	if (flags & MDB_PG_FLAGS_OFFLOAD)
140 		e->flags |= MDB_FLAGS_OFFLOAD;
141 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 		e->flags |= MDB_FLAGS_FAST_LEAVE;
143 	if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 		e->flags |= MDB_FLAGS_STAR_EXCL;
145 	if (flags & MDB_PG_FLAGS_BLOCKED)
146 		e->flags |= MDB_FLAGS_BLOCKED;
147 }
148 
149 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
150 				 struct nlattr **mdb_attrs)
151 {
152 	memset(ip, 0, sizeof(struct br_ip));
153 	ip->vid = entry->vid;
154 	ip->proto = entry->addr.proto;
155 	switch (ip->proto) {
156 	case htons(ETH_P_IP):
157 		ip->dst.ip4 = entry->addr.u.ip4;
158 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
159 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
160 		break;
161 #if IS_ENABLED(CONFIG_IPV6)
162 	case htons(ETH_P_IPV6):
163 		ip->dst.ip6 = entry->addr.u.ip6;
164 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
165 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
166 		break;
167 #endif
168 	default:
169 		ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
170 	}
171 
172 }
173 
174 static int __mdb_fill_srcs(struct sk_buff *skb,
175 			   struct net_bridge_port_group *p)
176 {
177 	struct net_bridge_group_src *ent;
178 	struct nlattr *nest, *nest_ent;
179 
180 	if (hlist_empty(&p->src_list))
181 		return 0;
182 
183 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
184 	if (!nest)
185 		return -EMSGSIZE;
186 
187 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
188 				 lockdep_is_held(&p->key.port->br->multicast_lock)) {
189 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
190 		if (!nest_ent)
191 			goto out_cancel_err;
192 		switch (ent->addr.proto) {
193 		case htons(ETH_P_IP):
194 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
195 					    ent->addr.src.ip4)) {
196 				nla_nest_cancel(skb, nest_ent);
197 				goto out_cancel_err;
198 			}
199 			break;
200 #if IS_ENABLED(CONFIG_IPV6)
201 		case htons(ETH_P_IPV6):
202 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
203 					     &ent->addr.src.ip6)) {
204 				nla_nest_cancel(skb, nest_ent);
205 				goto out_cancel_err;
206 			}
207 			break;
208 #endif
209 		default:
210 			nla_nest_cancel(skb, nest_ent);
211 			continue;
212 		}
213 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
214 				br_timer_value(&ent->timer))) {
215 			nla_nest_cancel(skb, nest_ent);
216 			goto out_cancel_err;
217 		}
218 		nla_nest_end(skb, nest_ent);
219 	}
220 
221 	nla_nest_end(skb, nest);
222 
223 	return 0;
224 
225 out_cancel_err:
226 	nla_nest_cancel(skb, nest);
227 	return -EMSGSIZE;
228 }
229 
230 static int __mdb_fill_info(struct sk_buff *skb,
231 			   struct net_bridge_mdb_entry *mp,
232 			   struct net_bridge_port_group *p)
233 {
234 	bool dump_srcs_mode = false;
235 	struct timer_list *mtimer;
236 	struct nlattr *nest_ent;
237 	struct br_mdb_entry e;
238 	u8 flags = 0;
239 	int ifindex;
240 
241 	memset(&e, 0, sizeof(e));
242 	if (p) {
243 		ifindex = p->key.port->dev->ifindex;
244 		mtimer = &p->timer;
245 		flags = p->flags;
246 	} else {
247 		ifindex = mp->br->dev->ifindex;
248 		mtimer = &mp->timer;
249 	}
250 
251 	__mdb_entry_fill_flags(&e, flags);
252 	e.ifindex = ifindex;
253 	e.vid = mp->addr.vid;
254 	if (mp->addr.proto == htons(ETH_P_IP)) {
255 		e.addr.u.ip4 = mp->addr.dst.ip4;
256 #if IS_ENABLED(CONFIG_IPV6)
257 	} else if (mp->addr.proto == htons(ETH_P_IPV6)) {
258 		e.addr.u.ip6 = mp->addr.dst.ip6;
259 #endif
260 	} else {
261 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
262 		e.state = MDB_PERMANENT;
263 	}
264 	e.addr.proto = mp->addr.proto;
265 	nest_ent = nla_nest_start_noflag(skb,
266 					 MDBA_MDB_ENTRY_INFO);
267 	if (!nest_ent)
268 		return -EMSGSIZE;
269 
270 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
271 	    nla_put_u32(skb,
272 			MDBA_MDB_EATTR_TIMER,
273 			br_timer_value(mtimer)))
274 		goto nest_err;
275 
276 	switch (mp->addr.proto) {
277 	case htons(ETH_P_IP):
278 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
279 		if (mp->addr.src.ip4) {
280 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
281 					    mp->addr.src.ip4))
282 				goto nest_err;
283 			break;
284 		}
285 		break;
286 #if IS_ENABLED(CONFIG_IPV6)
287 	case htons(ETH_P_IPV6):
288 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
289 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
290 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
291 					     &mp->addr.src.ip6))
292 				goto nest_err;
293 			break;
294 		}
295 		break;
296 #endif
297 	default:
298 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
299 	}
300 	if (p) {
301 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
302 			goto nest_err;
303 		if (dump_srcs_mode &&
304 		    (__mdb_fill_srcs(skb, p) ||
305 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
306 				p->filter_mode)))
307 			goto nest_err;
308 	}
309 	nla_nest_end(skb, nest_ent);
310 
311 	return 0;
312 
313 nest_err:
314 	nla_nest_cancel(skb, nest_ent);
315 	return -EMSGSIZE;
316 }
317 
318 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
319 			    struct net_device *dev)
320 {
321 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
322 	struct net_bridge *br = netdev_priv(dev);
323 	struct net_bridge_mdb_entry *mp;
324 	struct nlattr *nest, *nest2;
325 
326 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
327 		return 0;
328 
329 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
330 	if (nest == NULL)
331 		return -EMSGSIZE;
332 
333 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
334 		struct net_bridge_port_group *p;
335 		struct net_bridge_port_group __rcu **pp;
336 
337 		if (idx < s_idx)
338 			goto skip;
339 
340 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
341 		if (!nest2) {
342 			err = -EMSGSIZE;
343 			break;
344 		}
345 
346 		if (!s_pidx && mp->host_joined) {
347 			err = __mdb_fill_info(skb, mp, NULL);
348 			if (err) {
349 				nla_nest_cancel(skb, nest2);
350 				break;
351 			}
352 		}
353 
354 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
355 		      pp = &p->next) {
356 			if (!p->key.port)
357 				continue;
358 			if (pidx < s_pidx)
359 				goto skip_pg;
360 
361 			err = __mdb_fill_info(skb, mp, p);
362 			if (err) {
363 				nla_nest_end(skb, nest2);
364 				goto out;
365 			}
366 skip_pg:
367 			pidx++;
368 		}
369 		pidx = 0;
370 		s_pidx = 0;
371 		nla_nest_end(skb, nest2);
372 skip:
373 		idx++;
374 	}
375 
376 out:
377 	cb->args[1] = idx;
378 	cb->args[2] = pidx;
379 	nla_nest_end(skb, nest);
380 	return err;
381 }
382 
383 int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
384 		struct netlink_callback *cb)
385 {
386 	struct net_bridge *br = netdev_priv(dev);
387 	struct br_port_msg *bpm;
388 	struct nlmsghdr *nlh;
389 	int err;
390 
391 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
392 			cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
393 			NLM_F_MULTI);
394 	if (!nlh)
395 		return -EMSGSIZE;
396 
397 	bpm = nlmsg_data(nlh);
398 	memset(bpm, 0, sizeof(*bpm));
399 	bpm->ifindex = dev->ifindex;
400 
401 	rcu_read_lock();
402 
403 	err = br_mdb_fill_info(skb, cb, dev);
404 	if (err)
405 		goto out;
406 	err = br_rports_fill_info(skb, &br->multicast_ctx);
407 	if (err)
408 		goto out;
409 
410 out:
411 	rcu_read_unlock();
412 	nlmsg_end(skb, nlh);
413 	return err;
414 }
415 
416 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
417 				   struct net_device *dev,
418 				   struct net_bridge_mdb_entry *mp,
419 				   struct net_bridge_port_group *pg,
420 				   int type)
421 {
422 	struct nlmsghdr *nlh;
423 	struct br_port_msg *bpm;
424 	struct nlattr *nest, *nest2;
425 
426 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
427 	if (!nlh)
428 		return -EMSGSIZE;
429 
430 	bpm = nlmsg_data(nlh);
431 	memset(bpm, 0, sizeof(*bpm));
432 	bpm->family  = AF_BRIDGE;
433 	bpm->ifindex = dev->ifindex;
434 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
435 	if (nest == NULL)
436 		goto cancel;
437 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
438 	if (nest2 == NULL)
439 		goto end;
440 
441 	if (__mdb_fill_info(skb, mp, pg))
442 		goto end;
443 
444 	nla_nest_end(skb, nest2);
445 	nla_nest_end(skb, nest);
446 	nlmsg_end(skb, nlh);
447 	return 0;
448 
449 end:
450 	nla_nest_end(skb, nest);
451 cancel:
452 	nlmsg_cancel(skb, nlh);
453 	return -EMSGSIZE;
454 }
455 
456 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
457 {
458 	size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
459 			    nla_total_size(sizeof(struct br_mdb_entry)) +
460 			    nla_total_size(sizeof(u32));
461 	struct net_bridge_group_src *ent;
462 	size_t addr_size = 0;
463 
464 	if (!pg)
465 		goto out;
466 
467 	/* MDBA_MDB_EATTR_RTPROT */
468 	nlmsg_size += nla_total_size(sizeof(u8));
469 
470 	switch (pg->key.addr.proto) {
471 	case htons(ETH_P_IP):
472 		/* MDBA_MDB_EATTR_SOURCE */
473 		if (pg->key.addr.src.ip4)
474 			nlmsg_size += nla_total_size(sizeof(__be32));
475 		if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
476 			goto out;
477 		addr_size = sizeof(__be32);
478 		break;
479 #if IS_ENABLED(CONFIG_IPV6)
480 	case htons(ETH_P_IPV6):
481 		/* MDBA_MDB_EATTR_SOURCE */
482 		if (!ipv6_addr_any(&pg->key.addr.src.ip6))
483 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
484 		if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
485 			goto out;
486 		addr_size = sizeof(struct in6_addr);
487 		break;
488 #endif
489 	}
490 
491 	/* MDBA_MDB_EATTR_GROUP_MODE */
492 	nlmsg_size += nla_total_size(sizeof(u8));
493 
494 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
495 	if (!hlist_empty(&pg->src_list))
496 		nlmsg_size += nla_total_size(0);
497 
498 	hlist_for_each_entry(ent, &pg->src_list, node) {
499 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
500 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
501 		 */
502 		nlmsg_size += nla_total_size(0) +
503 			      nla_total_size(addr_size) +
504 			      nla_total_size(sizeof(u32));
505 	}
506 out:
507 	return nlmsg_size;
508 }
509 
510 void br_mdb_notify(struct net_device *dev,
511 		   struct net_bridge_mdb_entry *mp,
512 		   struct net_bridge_port_group *pg,
513 		   int type)
514 {
515 	struct net *net = dev_net(dev);
516 	struct sk_buff *skb;
517 	int err = -ENOBUFS;
518 
519 	br_switchdev_mdb_notify(dev, mp, pg, type);
520 
521 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
522 	if (!skb)
523 		goto errout;
524 
525 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
526 	if (err < 0) {
527 		kfree_skb(skb);
528 		goto errout;
529 	}
530 
531 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
532 	return;
533 errout:
534 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
535 }
536 
537 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
538 				   struct net_device *dev,
539 				   int ifindex, u16 vid, u32 pid,
540 				   u32 seq, int type, unsigned int flags)
541 {
542 	struct nlattr *nest, *port_nest;
543 	struct br_port_msg *bpm;
544 	struct nlmsghdr *nlh;
545 
546 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
547 	if (!nlh)
548 		return -EMSGSIZE;
549 
550 	bpm = nlmsg_data(nlh);
551 	memset(bpm, 0, sizeof(*bpm));
552 	bpm->family = AF_BRIDGE;
553 	bpm->ifindex = dev->ifindex;
554 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
555 	if (!nest)
556 		goto cancel;
557 
558 	port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
559 	if (!port_nest)
560 		goto end;
561 	if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
562 		nla_nest_cancel(skb, port_nest);
563 		goto end;
564 	}
565 	if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
566 		nla_nest_cancel(skb, port_nest);
567 		goto end;
568 	}
569 	nla_nest_end(skb, port_nest);
570 
571 	nla_nest_end(skb, nest);
572 	nlmsg_end(skb, nlh);
573 	return 0;
574 
575 end:
576 	nla_nest_end(skb, nest);
577 cancel:
578 	nlmsg_cancel(skb, nlh);
579 	return -EMSGSIZE;
580 }
581 
582 static inline size_t rtnl_rtr_nlmsg_size(void)
583 {
584 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
585 		+ nla_total_size(sizeof(__u32))
586 		+ nla_total_size(sizeof(u16));
587 }
588 
589 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
590 		   int type)
591 {
592 	struct net *net = dev_net(dev);
593 	struct sk_buff *skb;
594 	int err = -ENOBUFS;
595 	int ifindex;
596 	u16 vid;
597 
598 	ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
599 	vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
600 							      0;
601 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
602 	if (!skb)
603 		goto errout;
604 
605 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
606 				      NTF_SELF);
607 	if (err < 0) {
608 		kfree_skb(skb);
609 		goto errout;
610 	}
611 
612 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
613 	return;
614 
615 errout:
616 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
617 }
618 
619 static const struct nla_policy
620 br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
621 	[MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
622 						  sizeof(struct in_addr),
623 						  sizeof(struct in6_addr)),
624 };
625 
626 static const struct nla_policy
627 br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
628 	[MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
629 };
630 
631 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
632 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
633 					      sizeof(struct in_addr),
634 					      sizeof(struct in6_addr)),
635 	[MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
636 						  MCAST_INCLUDE),
637 	[MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
638 	[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
639 };
640 
641 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
642 				struct netlink_ext_ack *extack)
643 {
644 	switch (proto) {
645 	case htons(ETH_P_IP):
646 		if (nla_len(attr) != sizeof(struct in_addr)) {
647 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
648 			return false;
649 		}
650 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
651 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
652 			return false;
653 		}
654 		break;
655 #if IS_ENABLED(CONFIG_IPV6)
656 	case htons(ETH_P_IPV6): {
657 		struct in6_addr src;
658 
659 		if (nla_len(attr) != sizeof(struct in6_addr)) {
660 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
661 			return false;
662 		}
663 		src = nla_get_in6_addr(attr);
664 		if (ipv6_addr_is_multicast(&src)) {
665 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
666 			return false;
667 		}
668 		break;
669 	}
670 #endif
671 	default:
672 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
673 		return false;
674 	}
675 
676 	return true;
677 }
678 
679 static struct net_bridge_mcast *
680 __br_mdb_choose_context(struct net_bridge *br,
681 			const struct br_mdb_entry *entry,
682 			struct netlink_ext_ack *extack)
683 {
684 	struct net_bridge_mcast *brmctx = NULL;
685 	struct net_bridge_vlan *v;
686 
687 	if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
688 		brmctx = &br->multicast_ctx;
689 		goto out;
690 	}
691 
692 	if (!entry->vid) {
693 		NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
694 		goto out;
695 	}
696 
697 	v = br_vlan_find(br_vlan_group(br), entry->vid);
698 	if (!v) {
699 		NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
700 		goto out;
701 	}
702 	if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
703 		NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
704 		goto out;
705 	}
706 	brmctx = &v->br_mcast_ctx;
707 out:
708 	return brmctx;
709 }
710 
711 static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
712 				   struct net_bridge_mdb_entry *mp,
713 				   struct net_bridge_port_group *pg,
714 				   struct net_bridge_mcast *brmctx,
715 				   unsigned char flags)
716 {
717 	unsigned long now = jiffies;
718 
719 	pg->flags = flags;
720 	pg->rt_protocol = cfg->rt_protocol;
721 	if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
722 		mod_timer(&pg->timer,
723 			  now + brmctx->multicast_membership_interval);
724 	else
725 		del_timer(&pg->timer);
726 
727 	br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
728 
729 	return 0;
730 }
731 
732 static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
733 			       struct net_bridge_mdb_entry *mp,
734 			       struct net_bridge_mcast *brmctx,
735 			       unsigned char flags,
736 			       struct netlink_ext_ack *extack)
737 {
738 	struct net_bridge_port_group __rcu **pp;
739 	struct net_bridge_port_group *p;
740 	unsigned long now = jiffies;
741 
742 	for (pp = &mp->ports;
743 	     (p = mlock_dereference(*pp, cfg->br)) != NULL;
744 	     pp = &p->next) {
745 		if (p->key.port == cfg->p) {
746 			if (!(cfg->nlflags & NLM_F_REPLACE)) {
747 				NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
748 				return -EEXIST;
749 			}
750 			return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
751 						       flags);
752 		}
753 		if ((unsigned long)p->key.port < (unsigned long)cfg->p)
754 			break;
755 	}
756 
757 	p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
758 					MCAST_INCLUDE, cfg->rt_protocol, extack);
759 	if (unlikely(!p))
760 		return -ENOMEM;
761 
762 	rcu_assign_pointer(*pp, p);
763 	if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
764 		mod_timer(&p->timer,
765 			  now + brmctx->multicast_membership_interval);
766 	br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
767 
768 	/* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
769 	 * proper replication.
770 	 */
771 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
772 		struct net_bridge_mdb_entry *star_mp;
773 		struct br_ip star_group;
774 
775 		star_group = p->key.addr;
776 		memset(&star_group.src, 0, sizeof(star_group.src));
777 		star_mp = br_mdb_ip_get(cfg->br, &star_group);
778 		if (star_mp)
779 			br_multicast_sg_add_exclude_ports(star_mp, p);
780 	}
781 
782 	return 0;
783 }
784 
785 static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
786 				    struct br_ip *src_ip,
787 				    struct net_bridge_mcast *brmctx,
788 				    struct netlink_ext_ack *extack)
789 {
790 	struct net_bridge_mdb_entry *sgmp;
791 	struct br_mdb_config sg_cfg;
792 	struct br_ip sg_ip;
793 	u8 flags = 0;
794 
795 	sg_ip = cfg->group;
796 	sg_ip.src = src_ip->src;
797 	sgmp = br_multicast_new_group(cfg->br, &sg_ip);
798 	if (IS_ERR(sgmp)) {
799 		NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
800 		return PTR_ERR(sgmp);
801 	}
802 
803 	if (cfg->entry->state == MDB_PERMANENT)
804 		flags |= MDB_PG_FLAGS_PERMANENT;
805 	if (cfg->filter_mode == MCAST_EXCLUDE)
806 		flags |= MDB_PG_FLAGS_BLOCKED;
807 
808 	memset(&sg_cfg, 0, sizeof(sg_cfg));
809 	sg_cfg.br = cfg->br;
810 	sg_cfg.p = cfg->p;
811 	sg_cfg.entry = cfg->entry;
812 	sg_cfg.group = sg_ip;
813 	sg_cfg.src_entry = true;
814 	sg_cfg.filter_mode = MCAST_INCLUDE;
815 	sg_cfg.rt_protocol = cfg->rt_protocol;
816 	sg_cfg.nlflags = cfg->nlflags;
817 	return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
818 }
819 
820 static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
821 				struct net_bridge_port_group *pg,
822 				struct net_bridge_mcast *brmctx,
823 				struct br_mdb_src_entry *src,
824 				struct netlink_ext_ack *extack)
825 {
826 	struct net_bridge_group_src *ent;
827 	unsigned long now = jiffies;
828 	int err;
829 
830 	ent = br_multicast_find_group_src(pg, &src->addr);
831 	if (!ent) {
832 		ent = br_multicast_new_group_src(pg, &src->addr);
833 		if (!ent) {
834 			NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
835 			return -ENOSPC;
836 		}
837 	} else if (!(cfg->nlflags & NLM_F_REPLACE)) {
838 		NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
839 		return -EEXIST;
840 	}
841 
842 	if (cfg->filter_mode == MCAST_INCLUDE &&
843 	    cfg->entry->state == MDB_TEMPORARY)
844 		mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
845 	else
846 		del_timer(&ent->timer);
847 
848 	/* Install a (S, G) forwarding entry for the source. */
849 	err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
850 	if (err)
851 		goto err_del_sg;
852 
853 	ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
854 
855 	return 0;
856 
857 err_del_sg:
858 	__br_multicast_del_group_src(ent);
859 	return err;
860 }
861 
862 static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
863 				 struct br_mdb_src_entry *src)
864 {
865 	struct net_bridge_group_src *ent;
866 
867 	ent = br_multicast_find_group_src(pg, &src->addr);
868 	if (WARN_ON_ONCE(!ent))
869 		return;
870 	br_multicast_del_group_src(ent, false);
871 }
872 
873 static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
874 				 struct net_bridge_port_group *pg,
875 				 struct net_bridge_mcast *brmctx,
876 				 struct netlink_ext_ack *extack)
877 {
878 	int i, err;
879 
880 	for (i = 0; i < cfg->num_src_entries; i++) {
881 		err = br_mdb_add_group_src(cfg, pg, brmctx,
882 					   &cfg->src_entries[i], extack);
883 		if (err)
884 			goto err_del_group_srcs;
885 	}
886 
887 	return 0;
888 
889 err_del_group_srcs:
890 	for (i--; i >= 0; i--)
891 		br_mdb_del_group_src(pg, &cfg->src_entries[i]);
892 	return err;
893 }
894 
895 static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
896 				     struct net_bridge_port_group *pg,
897 				     struct net_bridge_mcast *brmctx,
898 				     struct netlink_ext_ack *extack)
899 {
900 	struct net_bridge_group_src *ent;
901 	struct hlist_node *tmp;
902 	int err;
903 
904 	hlist_for_each_entry(ent, &pg->src_list, node)
905 		ent->flags |= BR_SGRP_F_DELETE;
906 
907 	err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
908 	if (err)
909 		goto err_clear_delete;
910 
911 	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
912 		if (ent->flags & BR_SGRP_F_DELETE)
913 			br_multicast_del_group_src(ent, false);
914 	}
915 
916 	return 0;
917 
918 err_clear_delete:
919 	hlist_for_each_entry(ent, &pg->src_list, node)
920 		ent->flags &= ~BR_SGRP_F_DELETE;
921 	return err;
922 }
923 
924 static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
925 				       struct net_bridge_mdb_entry *mp,
926 				       struct net_bridge_port_group *pg,
927 				       struct net_bridge_mcast *brmctx,
928 				       unsigned char flags,
929 				       struct netlink_ext_ack *extack)
930 {
931 	unsigned long now = jiffies;
932 	int err;
933 
934 	err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
935 	if (err)
936 		return err;
937 
938 	pg->flags = flags;
939 	pg->filter_mode = cfg->filter_mode;
940 	pg->rt_protocol = cfg->rt_protocol;
941 	if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
942 	    cfg->filter_mode == MCAST_EXCLUDE)
943 		mod_timer(&pg->timer,
944 			  now + brmctx->multicast_membership_interval);
945 	else
946 		del_timer(&pg->timer);
947 
948 	br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
949 
950 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
951 		br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
952 
953 	return 0;
954 }
955 
956 static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
957 				   struct net_bridge_mdb_entry *mp,
958 				   struct net_bridge_mcast *brmctx,
959 				   unsigned char flags,
960 				   struct netlink_ext_ack *extack)
961 {
962 	struct net_bridge_port_group __rcu **pp;
963 	struct net_bridge_port_group *p;
964 	unsigned long now = jiffies;
965 	int err;
966 
967 	for (pp = &mp->ports;
968 	     (p = mlock_dereference(*pp, cfg->br)) != NULL;
969 	     pp = &p->next) {
970 		if (p->key.port == cfg->p) {
971 			if (!(cfg->nlflags & NLM_F_REPLACE)) {
972 				NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
973 				return -EEXIST;
974 			}
975 			return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
976 							   flags, extack);
977 		}
978 		if ((unsigned long)p->key.port < (unsigned long)cfg->p)
979 			break;
980 	}
981 
982 	p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
983 					cfg->filter_mode, cfg->rt_protocol,
984 					extack);
985 	if (unlikely(!p))
986 		return -ENOMEM;
987 
988 	err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
989 	if (err)
990 		goto err_del_port_group;
991 
992 	rcu_assign_pointer(*pp, p);
993 	if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
994 	    cfg->filter_mode == MCAST_EXCLUDE)
995 		mod_timer(&p->timer,
996 			  now + brmctx->multicast_membership_interval);
997 	br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
998 	/* If we are adding a new EXCLUDE port group (*, G), it needs to be
999 	 * also added to all (S, G) entries for proper replication.
1000 	 */
1001 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
1002 	    cfg->filter_mode == MCAST_EXCLUDE)
1003 		br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1004 
1005 	return 0;
1006 
1007 err_del_port_group:
1008 	br_multicast_del_port_group(p);
1009 	return err;
1010 }
1011 
1012 static int br_mdb_add_group(const struct br_mdb_config *cfg,
1013 			    struct netlink_ext_ack *extack)
1014 {
1015 	struct br_mdb_entry *entry = cfg->entry;
1016 	struct net_bridge_port *port = cfg->p;
1017 	struct net_bridge_mdb_entry *mp;
1018 	struct net_bridge *br = cfg->br;
1019 	struct net_bridge_mcast *brmctx;
1020 	struct br_ip group = cfg->group;
1021 	unsigned char flags = 0;
1022 
1023 	brmctx = __br_mdb_choose_context(br, entry, extack);
1024 	if (!brmctx)
1025 		return -EINVAL;
1026 
1027 	mp = br_multicast_new_group(br, &group);
1028 	if (IS_ERR(mp))
1029 		return PTR_ERR(mp);
1030 
1031 	/* host join */
1032 	if (!port) {
1033 		if (mp->host_joined) {
1034 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1035 			return -EEXIST;
1036 		}
1037 
1038 		br_multicast_host_join(brmctx, mp, false);
1039 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1040 
1041 		return 0;
1042 	}
1043 
1044 	if (entry->state == MDB_PERMANENT)
1045 		flags |= MDB_PG_FLAGS_PERMANENT;
1046 
1047 	if (br_multicast_is_star_g(&group))
1048 		return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1049 	else
1050 		return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1051 }
1052 
1053 static int __br_mdb_add(const struct br_mdb_config *cfg,
1054 			struct netlink_ext_ack *extack)
1055 {
1056 	int ret;
1057 
1058 	spin_lock_bh(&cfg->br->multicast_lock);
1059 	ret = br_mdb_add_group(cfg, extack);
1060 	spin_unlock_bh(&cfg->br->multicast_lock);
1061 
1062 	return ret;
1063 }
1064 
1065 static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
1066 					struct br_mdb_src_entry *src,
1067 					__be16 proto,
1068 					struct netlink_ext_ack *extack)
1069 {
1070 	struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
1071 	int err;
1072 
1073 	err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
1074 			       br_mdbe_src_list_entry_pol, extack);
1075 	if (err)
1076 		return err;
1077 
1078 	if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
1079 		return -EINVAL;
1080 
1081 	if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
1082 		return -EINVAL;
1083 
1084 	src->addr.proto = proto;
1085 	nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
1086 		   nla_len(tb[MDBE_SRCATTR_ADDRESS]));
1087 
1088 	return 0;
1089 }
1090 
1091 static int br_mdb_config_src_list_init(struct nlattr *src_list,
1092 				       struct br_mdb_config *cfg,
1093 				       struct netlink_ext_ack *extack)
1094 {
1095 	struct nlattr *src_entry;
1096 	int rem, err;
1097 	int i = 0;
1098 
1099 	nla_for_each_nested(src_entry, src_list, rem)
1100 		cfg->num_src_entries++;
1101 
1102 	if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
1103 		NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
1104 				       PG_SRC_ENT_LIMIT - 1);
1105 		return -EINVAL;
1106 	}
1107 
1108 	cfg->src_entries = kcalloc(cfg->num_src_entries,
1109 				   sizeof(struct br_mdb_src_entry), GFP_KERNEL);
1110 	if (!cfg->src_entries)
1111 		return -ENOMEM;
1112 
1113 	nla_for_each_nested(src_entry, src_list, rem) {
1114 		err = br_mdb_config_src_entry_init(src_entry,
1115 						   &cfg->src_entries[i],
1116 						   cfg->entry->addr.proto,
1117 						   extack);
1118 		if (err)
1119 			goto err_src_entry_init;
1120 		i++;
1121 	}
1122 
1123 	return 0;
1124 
1125 err_src_entry_init:
1126 	kfree(cfg->src_entries);
1127 	return err;
1128 }
1129 
1130 static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
1131 {
1132 	kfree(cfg->src_entries);
1133 }
1134 
1135 static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
1136 				    struct br_mdb_config *cfg,
1137 				    struct netlink_ext_ack *extack)
1138 {
1139 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1140 	int err;
1141 
1142 	err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
1143 			       br_mdbe_attrs_pol, extack);
1144 	if (err)
1145 		return err;
1146 
1147 	if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1148 	    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1149 				 cfg->entry->addr.proto, extack))
1150 		return -EINVAL;
1151 
1152 	__mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
1153 
1154 	if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1155 		if (!cfg->p) {
1156 			NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
1157 			return -EINVAL;
1158 		}
1159 		if (!br_multicast_is_star_g(&cfg->group)) {
1160 			NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
1161 			return -EINVAL;
1162 		}
1163 		cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
1164 	} else {
1165 		cfg->filter_mode = MCAST_EXCLUDE;
1166 	}
1167 
1168 	if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
1169 		if (!cfg->p) {
1170 			NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
1171 			return -EINVAL;
1172 		}
1173 		if (!br_multicast_is_star_g(&cfg->group)) {
1174 			NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
1175 			return -EINVAL;
1176 		}
1177 		if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1178 			NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
1179 			return -EINVAL;
1180 		}
1181 		err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
1182 						  cfg, extack);
1183 		if (err)
1184 			return err;
1185 	}
1186 
1187 	if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
1188 		NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
1189 		return -EINVAL;
1190 	}
1191 
1192 	if (mdb_attrs[MDBE_ATTR_RTPROT]) {
1193 		if (!cfg->p) {
1194 			NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
1195 			return -EINVAL;
1196 		}
1197 		cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
1204 			      struct nlattr *tb[], u16 nlmsg_flags,
1205 			      struct netlink_ext_ack *extack)
1206 {
1207 	struct net *net = dev_net(dev);
1208 
1209 	memset(cfg, 0, sizeof(*cfg));
1210 	cfg->filter_mode = MCAST_EXCLUDE;
1211 	cfg->rt_protocol = RTPROT_STATIC;
1212 	cfg->nlflags = nlmsg_flags;
1213 
1214 	cfg->br = netdev_priv(dev);
1215 
1216 	if (!netif_running(cfg->br->dev)) {
1217 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1218 		return -EINVAL;
1219 	}
1220 
1221 	if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
1222 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1223 		return -EINVAL;
1224 	}
1225 
1226 	cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
1227 
1228 	if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
1229 		struct net_device *pdev;
1230 
1231 		pdev = __dev_get_by_index(net, cfg->entry->ifindex);
1232 		if (!pdev) {
1233 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1234 			return -ENODEV;
1235 		}
1236 
1237 		cfg->p = br_port_get_rtnl(pdev);
1238 		if (!cfg->p) {
1239 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1240 			return -EINVAL;
1241 		}
1242 
1243 		if (cfg->p->br != cfg->br) {
1244 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1245 			return -EINVAL;
1246 		}
1247 	}
1248 
1249 	if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
1250 	    ipv4_is_zeronet(cfg->entry->addr.u.ip4)) {
1251 		NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
1252 		return -EINVAL;
1253 	}
1254 
1255 	if (tb[MDBA_SET_ENTRY_ATTRS])
1256 		return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
1257 						extack);
1258 	else
1259 		__mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
1260 
1261 	return 0;
1262 }
1263 
1264 static void br_mdb_config_fini(struct br_mdb_config *cfg)
1265 {
1266 	br_mdb_config_src_list_fini(cfg);
1267 }
1268 
1269 int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
1270 	       struct netlink_ext_ack *extack)
1271 {
1272 	struct net_bridge_vlan_group *vg;
1273 	struct net_bridge_vlan *v;
1274 	struct br_mdb_config cfg;
1275 	int err;
1276 
1277 	err = br_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
1278 	if (err)
1279 		return err;
1280 
1281 	err = -EINVAL;
1282 	/* host join errors which can happen before creating the group */
1283 	if (!cfg.p && !br_group_is_l2(&cfg.group)) {
1284 		/* don't allow any flags for host-joined IP groups */
1285 		if (cfg.entry->state) {
1286 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1287 			goto out;
1288 		}
1289 		if (!br_multicast_is_star_g(&cfg.group)) {
1290 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1291 			goto out;
1292 		}
1293 	}
1294 
1295 	if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
1296 		NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1297 		goto out;
1298 	}
1299 
1300 	if (cfg.p) {
1301 		if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
1302 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1303 			goto out;
1304 		}
1305 		vg = nbp_vlan_group(cfg.p);
1306 	} else {
1307 		vg = br_vlan_group(cfg.br);
1308 	}
1309 
1310 	/* If vlan filtering is enabled and VLAN is not specified
1311 	 * install mdb entry on all vlans configured on the port.
1312 	 */
1313 	if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1314 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1315 			cfg.entry->vid = v->vid;
1316 			cfg.group.vid = v->vid;
1317 			err = __br_mdb_add(&cfg, extack);
1318 			if (err)
1319 				break;
1320 		}
1321 	} else {
1322 		err = __br_mdb_add(&cfg, extack);
1323 	}
1324 
1325 out:
1326 	br_mdb_config_fini(&cfg);
1327 	return err;
1328 }
1329 
1330 static int __br_mdb_del(const struct br_mdb_config *cfg)
1331 {
1332 	struct br_mdb_entry *entry = cfg->entry;
1333 	struct net_bridge *br = cfg->br;
1334 	struct net_bridge_mdb_entry *mp;
1335 	struct net_bridge_port_group *p;
1336 	struct net_bridge_port_group __rcu **pp;
1337 	struct br_ip ip = cfg->group;
1338 	int err = -EINVAL;
1339 
1340 	spin_lock_bh(&br->multicast_lock);
1341 	mp = br_mdb_ip_get(br, &ip);
1342 	if (!mp)
1343 		goto unlock;
1344 
1345 	/* host leave */
1346 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1347 		br_multicast_host_leave(mp, false);
1348 		err = 0;
1349 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1350 		if (!mp->ports && netif_running(br->dev))
1351 			mod_timer(&mp->timer, jiffies);
1352 		goto unlock;
1353 	}
1354 
1355 	for (pp = &mp->ports;
1356 	     (p = mlock_dereference(*pp, br)) != NULL;
1357 	     pp = &p->next) {
1358 		if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1359 			continue;
1360 
1361 		br_multicast_del_pg(mp, p, pp);
1362 		err = 0;
1363 		break;
1364 	}
1365 
1366 unlock:
1367 	spin_unlock_bh(&br->multicast_lock);
1368 	return err;
1369 }
1370 
1371 int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
1372 	       struct netlink_ext_ack *extack)
1373 {
1374 	struct net_bridge_vlan_group *vg;
1375 	struct net_bridge_vlan *v;
1376 	struct br_mdb_config cfg;
1377 	int err;
1378 
1379 	err = br_mdb_config_init(&cfg, dev, tb, 0, extack);
1380 	if (err)
1381 		return err;
1382 
1383 	if (cfg.p)
1384 		vg = nbp_vlan_group(cfg.p);
1385 	else
1386 		vg = br_vlan_group(cfg.br);
1387 
1388 	/* If vlan filtering is enabled and VLAN is not specified
1389 	 * delete mdb entry on all vlans configured on the port.
1390 	 */
1391 	if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1392 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1393 			cfg.entry->vid = v->vid;
1394 			cfg.group.vid = v->vid;
1395 			err = __br_mdb_del(&cfg);
1396 		}
1397 	} else {
1398 		err = __br_mdb_del(&cfg);
1399 	}
1400 
1401 	br_mdb_config_fini(&cfg);
1402 	return err;
1403 }
1404