xref: /openbmc/linux/net/bridge/br_mdb.c (revision 8f8cb77e0b22d9044d8d57ab3bb18ea8d0474752)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 			       struct net_device *dev)
21 {
22 	struct net_bridge *br = netdev_priv(dev);
23 	struct net_bridge_port *p;
24 	struct nlattr *nest, *port_nest;
25 
26 	if (!br->multicast_router || hlist_empty(&br->router_list))
27 		return 0;
28 
29 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
30 	if (nest == NULL)
31 		return -EMSGSIZE;
32 
33 	hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
34 		if (!p)
35 			continue;
36 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
37 		if (!port_nest)
38 			goto fail;
39 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 				br_timer_value(&p->multicast_router_timer)) ||
42 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 			       p->multicast_router)) {
44 			nla_nest_cancel(skb, port_nest);
45 			goto fail;
46 		}
47 		nla_nest_end(skb, port_nest);
48 	}
49 
50 	nla_nest_end(skb, nest);
51 	return 0;
52 fail:
53 	nla_nest_cancel(skb, nest);
54 	return -EMSGSIZE;
55 }
56 
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
58 {
59 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
60 	e->flags = 0;
61 	if (flags & MDB_PG_FLAGS_OFFLOAD)
62 		e->flags |= MDB_FLAGS_OFFLOAD;
63 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 		e->flags |= MDB_FLAGS_FAST_LEAVE;
65 }
66 
67 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
68 				 struct nlattr **mdb_attrs)
69 {
70 	memset(ip, 0, sizeof(struct br_ip));
71 	ip->vid = entry->vid;
72 	ip->proto = entry->addr.proto;
73 	switch (ip->proto) {
74 	case htons(ETH_P_IP):
75 		ip->dst.ip4 = entry->addr.u.ip4;
76 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
77 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
78 		break;
79 #if IS_ENABLED(CONFIG_IPV6)
80 	case htons(ETH_P_IPV6):
81 		ip->dst.ip6 = entry->addr.u.ip6;
82 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
83 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
84 		break;
85 #endif
86 	}
87 
88 }
89 
90 static int __mdb_fill_srcs(struct sk_buff *skb,
91 			   struct net_bridge_port_group *p)
92 {
93 	struct net_bridge_group_src *ent;
94 	struct nlattr *nest, *nest_ent;
95 
96 	if (hlist_empty(&p->src_list))
97 		return 0;
98 
99 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
100 	if (!nest)
101 		return -EMSGSIZE;
102 
103 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
104 				 lockdep_is_held(&p->port->br->multicast_lock)) {
105 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
106 		if (!nest_ent)
107 			goto out_cancel_err;
108 		switch (ent->addr.proto) {
109 		case htons(ETH_P_IP):
110 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
111 					    ent->addr.src.ip4)) {
112 				nla_nest_cancel(skb, nest_ent);
113 				goto out_cancel_err;
114 			}
115 			break;
116 #if IS_ENABLED(CONFIG_IPV6)
117 		case htons(ETH_P_IPV6):
118 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
119 					     &ent->addr.src.ip6)) {
120 				nla_nest_cancel(skb, nest_ent);
121 				goto out_cancel_err;
122 			}
123 			break;
124 #endif
125 		default:
126 			nla_nest_cancel(skb, nest_ent);
127 			continue;
128 		}
129 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
130 				br_timer_value(&ent->timer))) {
131 			nla_nest_cancel(skb, nest_ent);
132 			goto out_cancel_err;
133 		}
134 		nla_nest_end(skb, nest_ent);
135 	}
136 
137 	nla_nest_end(skb, nest);
138 
139 	return 0;
140 
141 out_cancel_err:
142 	nla_nest_cancel(skb, nest);
143 	return -EMSGSIZE;
144 }
145 
146 static int __mdb_fill_info(struct sk_buff *skb,
147 			   struct net_bridge_mdb_entry *mp,
148 			   struct net_bridge_port_group *p)
149 {
150 	bool dump_srcs_mode = false;
151 	struct timer_list *mtimer;
152 	struct nlattr *nest_ent;
153 	struct br_mdb_entry e;
154 	u8 flags = 0;
155 	int ifindex;
156 
157 	memset(&e, 0, sizeof(e));
158 	if (p) {
159 		ifindex = p->port->dev->ifindex;
160 		mtimer = &p->timer;
161 		flags = p->flags;
162 	} else {
163 		ifindex = mp->br->dev->ifindex;
164 		mtimer = &mp->timer;
165 	}
166 
167 	__mdb_entry_fill_flags(&e, flags);
168 	e.ifindex = ifindex;
169 	e.vid = mp->addr.vid;
170 	if (mp->addr.proto == htons(ETH_P_IP))
171 		e.addr.u.ip4 = mp->addr.dst.ip4;
172 #if IS_ENABLED(CONFIG_IPV6)
173 	if (mp->addr.proto == htons(ETH_P_IPV6))
174 		e.addr.u.ip6 = mp->addr.dst.ip6;
175 #endif
176 	e.addr.proto = mp->addr.proto;
177 	nest_ent = nla_nest_start_noflag(skb,
178 					 MDBA_MDB_ENTRY_INFO);
179 	if (!nest_ent)
180 		return -EMSGSIZE;
181 
182 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
183 	    nla_put_u32(skb,
184 			MDBA_MDB_EATTR_TIMER,
185 			br_timer_value(mtimer)))
186 		goto nest_err;
187 
188 	switch (mp->addr.proto) {
189 	case htons(ETH_P_IP):
190 		dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
191 		if (mp->addr.src.ip4) {
192 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
193 					    mp->addr.src.ip4))
194 				goto nest_err;
195 			break;
196 		}
197 		break;
198 #if IS_ENABLED(CONFIG_IPV6)
199 	case htons(ETH_P_IPV6):
200 		dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
201 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
202 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
203 					     &mp->addr.src.ip6))
204 				goto nest_err;
205 			break;
206 		}
207 		break;
208 #endif
209 	}
210 	if (p) {
211 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
212 			goto nest_err;
213 		if (dump_srcs_mode &&
214 		    (__mdb_fill_srcs(skb, p) ||
215 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
216 				p->filter_mode)))
217 			goto nest_err;
218 	}
219 	nla_nest_end(skb, nest_ent);
220 
221 	return 0;
222 
223 nest_err:
224 	nla_nest_cancel(skb, nest_ent);
225 	return -EMSGSIZE;
226 }
227 
228 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
229 			    struct net_device *dev)
230 {
231 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
232 	struct net_bridge *br = netdev_priv(dev);
233 	struct net_bridge_mdb_entry *mp;
234 	struct nlattr *nest, *nest2;
235 
236 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
237 		return 0;
238 
239 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
240 	if (nest == NULL)
241 		return -EMSGSIZE;
242 
243 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
244 		struct net_bridge_port_group *p;
245 		struct net_bridge_port_group __rcu **pp;
246 
247 		if (idx < s_idx)
248 			goto skip;
249 
250 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
251 		if (!nest2) {
252 			err = -EMSGSIZE;
253 			break;
254 		}
255 
256 		if (!s_pidx && mp->host_joined) {
257 			err = __mdb_fill_info(skb, mp, NULL);
258 			if (err) {
259 				nla_nest_cancel(skb, nest2);
260 				break;
261 			}
262 		}
263 
264 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
265 		      pp = &p->next) {
266 			if (!p->port)
267 				continue;
268 			if (pidx < s_pidx)
269 				goto skip_pg;
270 
271 			err = __mdb_fill_info(skb, mp, p);
272 			if (err) {
273 				nla_nest_end(skb, nest2);
274 				goto out;
275 			}
276 skip_pg:
277 			pidx++;
278 		}
279 		pidx = 0;
280 		s_pidx = 0;
281 		nla_nest_end(skb, nest2);
282 skip:
283 		idx++;
284 	}
285 
286 out:
287 	cb->args[1] = idx;
288 	cb->args[2] = pidx;
289 	nla_nest_end(skb, nest);
290 	return err;
291 }
292 
293 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
294 				 struct netlink_ext_ack *extack)
295 {
296 	struct br_port_msg *bpm;
297 
298 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
299 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
300 		return -EINVAL;
301 	}
302 
303 	bpm = nlmsg_data(nlh);
304 	if (bpm->ifindex) {
305 		NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
306 		return -EINVAL;
307 	}
308 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
309 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
310 		return -EINVAL;
311 	}
312 
313 	return 0;
314 }
315 
316 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
317 {
318 	struct net_device *dev;
319 	struct net *net = sock_net(skb->sk);
320 	struct nlmsghdr *nlh = NULL;
321 	int idx = 0, s_idx;
322 
323 	if (cb->strict_check) {
324 		int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
325 
326 		if (err < 0)
327 			return err;
328 	}
329 
330 	s_idx = cb->args[0];
331 
332 	rcu_read_lock();
333 
334 	cb->seq = net->dev_base_seq;
335 
336 	for_each_netdev_rcu(net, dev) {
337 		if (dev->priv_flags & IFF_EBRIDGE) {
338 			struct br_port_msg *bpm;
339 
340 			if (idx < s_idx)
341 				goto skip;
342 
343 			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
344 					cb->nlh->nlmsg_seq, RTM_GETMDB,
345 					sizeof(*bpm), NLM_F_MULTI);
346 			if (nlh == NULL)
347 				break;
348 
349 			bpm = nlmsg_data(nlh);
350 			memset(bpm, 0, sizeof(*bpm));
351 			bpm->ifindex = dev->ifindex;
352 			if (br_mdb_fill_info(skb, cb, dev) < 0)
353 				goto out;
354 			if (br_rports_fill_info(skb, cb, dev) < 0)
355 				goto out;
356 
357 			cb->args[1] = 0;
358 			nlmsg_end(skb, nlh);
359 		skip:
360 			idx++;
361 		}
362 	}
363 
364 out:
365 	if (nlh)
366 		nlmsg_end(skb, nlh);
367 	rcu_read_unlock();
368 	cb->args[0] = idx;
369 	return skb->len;
370 }
371 
372 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
373 				   struct net_device *dev,
374 				   struct net_bridge_mdb_entry *mp,
375 				   struct net_bridge_port_group *pg,
376 				   int type)
377 {
378 	struct nlmsghdr *nlh;
379 	struct br_port_msg *bpm;
380 	struct nlattr *nest, *nest2;
381 
382 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
383 	if (!nlh)
384 		return -EMSGSIZE;
385 
386 	bpm = nlmsg_data(nlh);
387 	memset(bpm, 0, sizeof(*bpm));
388 	bpm->family  = AF_BRIDGE;
389 	bpm->ifindex = dev->ifindex;
390 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
391 	if (nest == NULL)
392 		goto cancel;
393 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
394 	if (nest2 == NULL)
395 		goto end;
396 
397 	if (__mdb_fill_info(skb, mp, pg))
398 		goto end;
399 
400 	nla_nest_end(skb, nest2);
401 	nla_nest_end(skb, nest);
402 	nlmsg_end(skb, nlh);
403 	return 0;
404 
405 end:
406 	nla_nest_end(skb, nest);
407 cancel:
408 	nlmsg_cancel(skb, nlh);
409 	return -EMSGSIZE;
410 }
411 
412 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
413 {
414 	size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
415 			    nla_total_size(sizeof(struct br_mdb_entry)) +
416 			    nla_total_size(sizeof(u32));
417 	struct net_bridge_group_src *ent;
418 	size_t addr_size = 0;
419 
420 	if (!pg)
421 		goto out;
422 
423 	/* MDBA_MDB_EATTR_RTPROT */
424 	nlmsg_size += nla_total_size(sizeof(u8));
425 
426 	switch (pg->addr.proto) {
427 	case htons(ETH_P_IP):
428 		/* MDBA_MDB_EATTR_SOURCE */
429 		if (pg->addr.src.ip4)
430 			nlmsg_size += nla_total_size(sizeof(__be32));
431 		if (pg->port->br->multicast_igmp_version == 2)
432 			goto out;
433 		addr_size = sizeof(__be32);
434 		break;
435 #if IS_ENABLED(CONFIG_IPV6)
436 	case htons(ETH_P_IPV6):
437 		/* MDBA_MDB_EATTR_SOURCE */
438 		if (!ipv6_addr_any(&pg->addr.src.ip6))
439 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
440 		if (pg->port->br->multicast_mld_version == 1)
441 			goto out;
442 		addr_size = sizeof(struct in6_addr);
443 		break;
444 #endif
445 	}
446 
447 	/* MDBA_MDB_EATTR_GROUP_MODE */
448 	nlmsg_size += nla_total_size(sizeof(u8));
449 
450 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
451 	if (!hlist_empty(&pg->src_list))
452 		nlmsg_size += nla_total_size(0);
453 
454 	hlist_for_each_entry(ent, &pg->src_list, node) {
455 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
456 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
457 		 */
458 		nlmsg_size += nla_total_size(0) +
459 			      nla_total_size(addr_size) +
460 			      nla_total_size(sizeof(u32));
461 	}
462 out:
463 	return nlmsg_size;
464 }
465 
466 struct br_mdb_complete_info {
467 	struct net_bridge_port *port;
468 	struct br_ip ip;
469 };
470 
471 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
472 {
473 	struct br_mdb_complete_info *data = priv;
474 	struct net_bridge_port_group __rcu **pp;
475 	struct net_bridge_port_group *p;
476 	struct net_bridge_mdb_entry *mp;
477 	struct net_bridge_port *port = data->port;
478 	struct net_bridge *br = port->br;
479 
480 	if (err)
481 		goto err;
482 
483 	spin_lock_bh(&br->multicast_lock);
484 	mp = br_mdb_ip_get(br, &data->ip);
485 	if (!mp)
486 		goto out;
487 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
488 	     pp = &p->next) {
489 		if (p->port != port)
490 			continue;
491 		p->flags |= MDB_PG_FLAGS_OFFLOAD;
492 	}
493 out:
494 	spin_unlock_bh(&br->multicast_lock);
495 err:
496 	kfree(priv);
497 }
498 
499 static void br_mdb_switchdev_host_port(struct net_device *dev,
500 				       struct net_device *lower_dev,
501 				       struct net_bridge_mdb_entry *mp,
502 				       int type)
503 {
504 	struct switchdev_obj_port_mdb mdb = {
505 		.obj = {
506 			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
507 			.flags = SWITCHDEV_F_DEFER,
508 		},
509 		.vid = mp->addr.vid,
510 	};
511 
512 	if (mp->addr.proto == htons(ETH_P_IP))
513 		ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
514 #if IS_ENABLED(CONFIG_IPV6)
515 	else
516 		ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
517 #endif
518 
519 	mdb.obj.orig_dev = dev;
520 	switch (type) {
521 	case RTM_NEWMDB:
522 		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
523 		break;
524 	case RTM_DELMDB:
525 		switchdev_port_obj_del(lower_dev, &mdb.obj);
526 		break;
527 	}
528 }
529 
530 static void br_mdb_switchdev_host(struct net_device *dev,
531 				  struct net_bridge_mdb_entry *mp, int type)
532 {
533 	struct net_device *lower_dev;
534 	struct list_head *iter;
535 
536 	netdev_for_each_lower_dev(dev, lower_dev, iter)
537 		br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
538 }
539 
540 void br_mdb_notify(struct net_device *dev,
541 		   struct net_bridge_mdb_entry *mp,
542 		   struct net_bridge_port_group *pg,
543 		   int type)
544 {
545 	struct br_mdb_complete_info *complete_info;
546 	struct switchdev_obj_port_mdb mdb = {
547 		.obj = {
548 			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
549 			.flags = SWITCHDEV_F_DEFER,
550 		},
551 		.vid = mp->addr.vid,
552 	};
553 	struct net *net = dev_net(dev);
554 	struct sk_buff *skb;
555 	int err = -ENOBUFS;
556 
557 	if (pg) {
558 		if (mp->addr.proto == htons(ETH_P_IP))
559 			ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
560 #if IS_ENABLED(CONFIG_IPV6)
561 		else
562 			ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
563 #endif
564 		mdb.obj.orig_dev = pg->port->dev;
565 		switch (type) {
566 		case RTM_NEWMDB:
567 			complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
568 			if (!complete_info)
569 				break;
570 			complete_info->port = pg->port;
571 			complete_info->ip = mp->addr;
572 			mdb.obj.complete_priv = complete_info;
573 			mdb.obj.complete = br_mdb_complete;
574 			if (switchdev_port_obj_add(pg->port->dev, &mdb.obj, NULL))
575 				kfree(complete_info);
576 			break;
577 		case RTM_DELMDB:
578 			switchdev_port_obj_del(pg->port->dev, &mdb.obj);
579 			break;
580 		}
581 	} else {
582 		br_mdb_switchdev_host(dev, mp, type);
583 	}
584 
585 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
586 	if (!skb)
587 		goto errout;
588 
589 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
590 	if (err < 0) {
591 		kfree_skb(skb);
592 		goto errout;
593 	}
594 
595 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
596 	return;
597 errout:
598 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
599 }
600 
601 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
602 				   struct net_device *dev,
603 				   int ifindex, u32 pid,
604 				   u32 seq, int type, unsigned int flags)
605 {
606 	struct br_port_msg *bpm;
607 	struct nlmsghdr *nlh;
608 	struct nlattr *nest;
609 
610 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
611 	if (!nlh)
612 		return -EMSGSIZE;
613 
614 	bpm = nlmsg_data(nlh);
615 	memset(bpm, 0, sizeof(*bpm));
616 	bpm->family = AF_BRIDGE;
617 	bpm->ifindex = dev->ifindex;
618 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
619 	if (!nest)
620 		goto cancel;
621 
622 	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
623 		goto end;
624 
625 	nla_nest_end(skb, nest);
626 	nlmsg_end(skb, nlh);
627 	return 0;
628 
629 end:
630 	nla_nest_end(skb, nest);
631 cancel:
632 	nlmsg_cancel(skb, nlh);
633 	return -EMSGSIZE;
634 }
635 
636 static inline size_t rtnl_rtr_nlmsg_size(void)
637 {
638 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
639 		+ nla_total_size(sizeof(__u32));
640 }
641 
642 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
643 		   int type)
644 {
645 	struct net *net = dev_net(dev);
646 	struct sk_buff *skb;
647 	int err = -ENOBUFS;
648 	int ifindex;
649 
650 	ifindex = port ? port->dev->ifindex : 0;
651 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
652 	if (!skb)
653 		goto errout;
654 
655 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
656 	if (err < 0) {
657 		kfree_skb(skb);
658 		goto errout;
659 	}
660 
661 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
662 	return;
663 
664 errout:
665 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
666 }
667 
668 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
669 			       struct netlink_ext_ack *extack)
670 {
671 	if (entry->ifindex == 0) {
672 		NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
673 		return false;
674 	}
675 
676 	if (entry->addr.proto == htons(ETH_P_IP)) {
677 		if (!ipv4_is_multicast(entry->addr.u.ip4)) {
678 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
679 			return false;
680 		}
681 		if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
682 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
683 			return false;
684 		}
685 #if IS_ENABLED(CONFIG_IPV6)
686 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
687 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
688 			NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
689 			return false;
690 		}
691 #endif
692 	} else {
693 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
694 		return false;
695 	}
696 
697 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
698 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
699 		return false;
700 	}
701 	if (entry->vid >= VLAN_VID_MASK) {
702 		NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
703 		return false;
704 	}
705 
706 	return true;
707 }
708 
709 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
710 				struct netlink_ext_ack *extack)
711 {
712 	switch (proto) {
713 	case htons(ETH_P_IP):
714 		if (nla_len(attr) != sizeof(struct in_addr)) {
715 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
716 			return false;
717 		}
718 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
719 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
720 			return false;
721 		}
722 		break;
723 #if IS_ENABLED(CONFIG_IPV6)
724 	case htons(ETH_P_IPV6): {
725 		struct in6_addr src;
726 
727 		if (nla_len(attr) != sizeof(struct in6_addr)) {
728 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
729 			return false;
730 		}
731 		src = nla_get_in6_addr(attr);
732 		if (ipv6_addr_is_multicast(&src)) {
733 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
734 			return false;
735 		}
736 		break;
737 	}
738 #endif
739 	default:
740 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
741 		return false;
742 	}
743 
744 	return true;
745 }
746 
747 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
748 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
749 					      sizeof(struct in_addr),
750 					      sizeof(struct in6_addr)),
751 };
752 
753 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
754 			struct net_device **pdev, struct br_mdb_entry **pentry,
755 			struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
756 {
757 	struct net *net = sock_net(skb->sk);
758 	struct br_mdb_entry *entry;
759 	struct br_port_msg *bpm;
760 	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
761 	struct net_device *dev;
762 	int err;
763 
764 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
765 				     MDBA_SET_ENTRY_MAX, NULL, NULL);
766 	if (err < 0)
767 		return err;
768 
769 	bpm = nlmsg_data(nlh);
770 	if (bpm->ifindex == 0) {
771 		NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
772 		return -EINVAL;
773 	}
774 
775 	dev = __dev_get_by_index(net, bpm->ifindex);
776 	if (dev == NULL) {
777 		NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
778 		return -ENODEV;
779 	}
780 
781 	if (!(dev->priv_flags & IFF_EBRIDGE)) {
782 		NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
783 		return -EOPNOTSUPP;
784 	}
785 
786 	*pdev = dev;
787 
788 	if (!tb[MDBA_SET_ENTRY]) {
789 		NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
790 		return -EINVAL;
791 	}
792 	if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
793 		NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
794 		return -EINVAL;
795 	}
796 
797 	entry = nla_data(tb[MDBA_SET_ENTRY]);
798 	if (!is_valid_mdb_entry(entry, extack))
799 		return -EINVAL;
800 	*pentry = entry;
801 
802 	if (tb[MDBA_SET_ENTRY_ATTRS]) {
803 		err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
804 				       tb[MDBA_SET_ENTRY_ATTRS],
805 				       br_mdbe_attrs_pol, extack);
806 		if (err)
807 			return err;
808 		if (mdb_attrs[MDBE_ATTR_SOURCE] &&
809 		    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
810 					 entry->addr.proto, extack))
811 			return -EINVAL;
812 	} else {
813 		memset(mdb_attrs, 0,
814 		       sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
815 	}
816 
817 	return 0;
818 }
819 
820 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
821 			    struct br_mdb_entry *entry,
822 			    struct nlattr **mdb_attrs,
823 			    struct netlink_ext_ack *extack)
824 {
825 	struct net_bridge_mdb_entry *mp;
826 	struct net_bridge_port_group *p;
827 	struct net_bridge_port_group __rcu **pp;
828 	unsigned long now = jiffies;
829 	struct br_ip group;
830 	u8 filter_mode;
831 	int err;
832 
833 	__mdb_entry_to_br_ip(entry, &group, mdb_attrs);
834 
835 	/* host join errors which can happen before creating the group */
836 	if (!port) {
837 		/* don't allow any flags for host-joined groups */
838 		if (entry->state) {
839 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
840 			return -EINVAL;
841 		}
842 		if (!br_multicast_is_star_g(&group)) {
843 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
844 			return -EINVAL;
845 		}
846 	}
847 
848 	mp = br_mdb_ip_get(br, &group);
849 	if (!mp) {
850 		mp = br_multicast_new_group(br, &group);
851 		err = PTR_ERR_OR_ZERO(mp);
852 		if (err)
853 			return err;
854 	}
855 
856 	/* host join */
857 	if (!port) {
858 		if (mp->host_joined) {
859 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
860 			return -EEXIST;
861 		}
862 
863 		br_multicast_host_join(mp, false);
864 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
865 
866 		return 0;
867 	}
868 
869 	for (pp = &mp->ports;
870 	     (p = mlock_dereference(*pp, br)) != NULL;
871 	     pp = &p->next) {
872 		if (p->port == port) {
873 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
874 			return -EEXIST;
875 		}
876 		if ((unsigned long)p->port < (unsigned long)port)
877 			break;
878 	}
879 
880 	filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
881 						       MCAST_INCLUDE;
882 
883 	p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
884 					filter_mode, RTPROT_STATIC);
885 	if (unlikely(!p)) {
886 		NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
887 		return -ENOMEM;
888 	}
889 	rcu_assign_pointer(*pp, p);
890 	if (entry->state == MDB_TEMPORARY)
891 		mod_timer(&p->timer, now + br->multicast_membership_interval);
892 	br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
893 
894 	return 0;
895 }
896 
897 static int __br_mdb_add(struct net *net, struct net_bridge *br,
898 			struct net_bridge_port *p,
899 			struct br_mdb_entry *entry,
900 			struct nlattr **mdb_attrs,
901 			struct netlink_ext_ack *extack)
902 {
903 	int ret;
904 
905 	spin_lock_bh(&br->multicast_lock);
906 	ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
907 	spin_unlock_bh(&br->multicast_lock);
908 
909 	return ret;
910 }
911 
912 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
913 		      struct netlink_ext_ack *extack)
914 {
915 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
916 	struct net *net = sock_net(skb->sk);
917 	struct net_bridge_vlan_group *vg;
918 	struct net_bridge_port *p = NULL;
919 	struct net_device *dev, *pdev;
920 	struct br_mdb_entry *entry;
921 	struct net_bridge_vlan *v;
922 	struct net_bridge *br;
923 	int err;
924 
925 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
926 	if (err < 0)
927 		return err;
928 
929 	br = netdev_priv(dev);
930 
931 	if (!netif_running(br->dev)) {
932 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
933 		return -EINVAL;
934 	}
935 
936 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
937 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
938 		return -EINVAL;
939 	}
940 
941 	if (entry->ifindex != br->dev->ifindex) {
942 		pdev = __dev_get_by_index(net, entry->ifindex);
943 		if (!pdev) {
944 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
945 			return -ENODEV;
946 		}
947 
948 		p = br_port_get_rtnl(pdev);
949 		if (!p) {
950 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
951 			return -EINVAL;
952 		}
953 
954 		if (p->br != br) {
955 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
956 			return -EINVAL;
957 		}
958 		if (p->state == BR_STATE_DISABLED) {
959 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
960 			return -EINVAL;
961 		}
962 		vg = nbp_vlan_group(p);
963 	} else {
964 		vg = br_vlan_group(br);
965 	}
966 
967 	/* If vlan filtering is enabled and VLAN is not specified
968 	 * install mdb entry on all vlans configured on the port.
969 	 */
970 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
971 		list_for_each_entry(v, &vg->vlan_list, vlist) {
972 			entry->vid = v->vid;
973 			err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
974 			if (err)
975 				break;
976 		}
977 	} else {
978 		err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
979 	}
980 
981 	return err;
982 }
983 
984 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
985 			struct nlattr **mdb_attrs)
986 {
987 	struct net_bridge_mdb_entry *mp;
988 	struct net_bridge_port_group *p;
989 	struct net_bridge_port_group __rcu **pp;
990 	struct br_ip ip;
991 	int err = -EINVAL;
992 
993 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
994 		return -EINVAL;
995 
996 	__mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
997 
998 	spin_lock_bh(&br->multicast_lock);
999 	mp = br_mdb_ip_get(br, &ip);
1000 	if (!mp)
1001 		goto unlock;
1002 
1003 	/* host leave */
1004 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1005 		br_multicast_host_leave(mp, false);
1006 		err = 0;
1007 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1008 		if (!mp->ports && netif_running(br->dev))
1009 			mod_timer(&mp->timer, jiffies);
1010 		goto unlock;
1011 	}
1012 
1013 	for (pp = &mp->ports;
1014 	     (p = mlock_dereference(*pp, br)) != NULL;
1015 	     pp = &p->next) {
1016 		if (!p->port || p->port->dev->ifindex != entry->ifindex)
1017 			continue;
1018 
1019 		if (p->port->state == BR_STATE_DISABLED)
1020 			goto unlock;
1021 
1022 		br_multicast_del_pg(mp, p, pp);
1023 		err = 0;
1024 		break;
1025 	}
1026 
1027 unlock:
1028 	spin_unlock_bh(&br->multicast_lock);
1029 	return err;
1030 }
1031 
1032 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1033 		      struct netlink_ext_ack *extack)
1034 {
1035 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1036 	struct net *net = sock_net(skb->sk);
1037 	struct net_bridge_vlan_group *vg;
1038 	struct net_bridge_port *p = NULL;
1039 	struct net_device *dev, *pdev;
1040 	struct br_mdb_entry *entry;
1041 	struct net_bridge_vlan *v;
1042 	struct net_bridge *br;
1043 	int err;
1044 
1045 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1046 	if (err < 0)
1047 		return err;
1048 
1049 	br = netdev_priv(dev);
1050 
1051 	if (entry->ifindex != br->dev->ifindex) {
1052 		pdev = __dev_get_by_index(net, entry->ifindex);
1053 		if (!pdev)
1054 			return -ENODEV;
1055 
1056 		p = br_port_get_rtnl(pdev);
1057 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1058 			return -EINVAL;
1059 		vg = nbp_vlan_group(p);
1060 	} else {
1061 		vg = br_vlan_group(br);
1062 	}
1063 
1064 	/* If vlan filtering is enabled and VLAN is not specified
1065 	 * delete mdb entry on all vlans configured on the port.
1066 	 */
1067 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1068 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1069 			entry->vid = v->vid;
1070 			err = __br_mdb_del(br, entry, mdb_attrs);
1071 		}
1072 	} else {
1073 		err = __br_mdb_del(br, entry, mdb_attrs);
1074 	}
1075 
1076 	return err;
1077 }
1078 
1079 void br_mdb_init(void)
1080 {
1081 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1082 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1083 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1084 }
1085 
1086 void br_mdb_uninit(void)
1087 {
1088 	rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1089 	rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1090 	rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1091 }
1092