xref: /openbmc/linux/net/bridge/br_mdb.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 			       struct net_device *dev)
21 {
22 	struct net_bridge *br = netdev_priv(dev);
23 	struct net_bridge_port *p;
24 	struct nlattr *nest, *port_nest;
25 
26 	if (!br->multicast_router || hlist_empty(&br->router_list))
27 		return 0;
28 
29 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
30 	if (nest == NULL)
31 		return -EMSGSIZE;
32 
33 	hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
34 		if (!p)
35 			continue;
36 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
37 		if (!port_nest)
38 			goto fail;
39 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 				br_timer_value(&p->multicast_router_timer)) ||
42 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 			       p->multicast_router)) {
44 			nla_nest_cancel(skb, port_nest);
45 			goto fail;
46 		}
47 		nla_nest_end(skb, port_nest);
48 	}
49 
50 	nla_nest_end(skb, nest);
51 	return 0;
52 fail:
53 	nla_nest_cancel(skb, nest);
54 	return -EMSGSIZE;
55 }
56 
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
58 {
59 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
60 	e->flags = 0;
61 	if (flags & MDB_PG_FLAGS_OFFLOAD)
62 		e->flags |= MDB_FLAGS_OFFLOAD;
63 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 		e->flags |= MDB_FLAGS_FAST_LEAVE;
65 }
66 
67 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
68 {
69 	memset(ip, 0, sizeof(struct br_ip));
70 	ip->vid = entry->vid;
71 	ip->proto = entry->addr.proto;
72 	if (ip->proto == htons(ETH_P_IP))
73 		ip->u.ip4 = entry->addr.u.ip4;
74 #if IS_ENABLED(CONFIG_IPV6)
75 	else
76 		ip->u.ip6 = entry->addr.u.ip6;
77 #endif
78 }
79 
80 static int __mdb_fill_srcs(struct sk_buff *skb,
81 			   struct net_bridge_port_group *p)
82 {
83 	struct net_bridge_group_src *ent;
84 	struct nlattr *nest, *nest_ent;
85 
86 	if (hlist_empty(&p->src_list))
87 		return 0;
88 
89 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
90 	if (!nest)
91 		return -EMSGSIZE;
92 
93 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
94 				 lockdep_is_held(&p->port->br->multicast_lock)) {
95 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
96 		if (!nest_ent)
97 			goto out_cancel_err;
98 		switch (ent->addr.proto) {
99 		case htons(ETH_P_IP):
100 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
101 					    ent->addr.u.ip4)) {
102 				nla_nest_cancel(skb, nest_ent);
103 				goto out_cancel_err;
104 			}
105 			break;
106 #if IS_ENABLED(CONFIG_IPV6)
107 		case htons(ETH_P_IPV6):
108 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
109 					     &ent->addr.u.ip6)) {
110 				nla_nest_cancel(skb, nest_ent);
111 				goto out_cancel_err;
112 			}
113 			break;
114 #endif
115 		default:
116 			nla_nest_cancel(skb, nest_ent);
117 			continue;
118 		}
119 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
120 				br_timer_value(&ent->timer))) {
121 			nla_nest_cancel(skb, nest_ent);
122 			goto out_cancel_err;
123 		}
124 		nla_nest_end(skb, nest_ent);
125 	}
126 
127 	nla_nest_end(skb, nest);
128 
129 	return 0;
130 
131 out_cancel_err:
132 	nla_nest_cancel(skb, nest);
133 	return -EMSGSIZE;
134 }
135 
136 static int __mdb_fill_info(struct sk_buff *skb,
137 			   struct net_bridge_mdb_entry *mp,
138 			   struct net_bridge_port_group *p)
139 {
140 	bool dump_srcs_mode = false;
141 	struct timer_list *mtimer;
142 	struct nlattr *nest_ent;
143 	struct br_mdb_entry e;
144 	u8 flags = 0;
145 	int ifindex;
146 
147 	memset(&e, 0, sizeof(e));
148 	if (p) {
149 		ifindex = p->port->dev->ifindex;
150 		mtimer = &p->timer;
151 		flags = p->flags;
152 	} else {
153 		ifindex = mp->br->dev->ifindex;
154 		mtimer = &mp->timer;
155 	}
156 
157 	__mdb_entry_fill_flags(&e, flags);
158 	e.ifindex = ifindex;
159 	e.vid = mp->addr.vid;
160 	if (mp->addr.proto == htons(ETH_P_IP))
161 		e.addr.u.ip4 = mp->addr.u.ip4;
162 #if IS_ENABLED(CONFIG_IPV6)
163 	if (mp->addr.proto == htons(ETH_P_IPV6))
164 		e.addr.u.ip6 = mp->addr.u.ip6;
165 #endif
166 	e.addr.proto = mp->addr.proto;
167 	nest_ent = nla_nest_start_noflag(skb,
168 					 MDBA_MDB_ENTRY_INFO);
169 	if (!nest_ent)
170 		return -EMSGSIZE;
171 
172 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
173 	    nla_put_u32(skb,
174 			MDBA_MDB_EATTR_TIMER,
175 			br_timer_value(mtimer))) {
176 		nla_nest_cancel(skb, nest_ent);
177 		return -EMSGSIZE;
178 	}
179 	switch (mp->addr.proto) {
180 	case htons(ETH_P_IP):
181 		dump_srcs_mode = !!(p && mp->br->multicast_igmp_version == 3);
182 		break;
183 #if IS_ENABLED(CONFIG_IPV6)
184 	case htons(ETH_P_IPV6):
185 		dump_srcs_mode = !!(p && mp->br->multicast_mld_version == 2);
186 		break;
187 #endif
188 	}
189 	if (dump_srcs_mode &&
190 	    (__mdb_fill_srcs(skb, p) ||
191 	     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE, p->filter_mode))) {
192 		nla_nest_cancel(skb, nest_ent);
193 		return -EMSGSIZE;
194 	}
195 
196 	nla_nest_end(skb, nest_ent);
197 
198 	return 0;
199 }
200 
201 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
202 			    struct net_device *dev)
203 {
204 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
205 	struct net_bridge *br = netdev_priv(dev);
206 	struct net_bridge_mdb_entry *mp;
207 	struct nlattr *nest, *nest2;
208 
209 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
210 		return 0;
211 
212 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
213 	if (nest == NULL)
214 		return -EMSGSIZE;
215 
216 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
217 		struct net_bridge_port_group *p;
218 		struct net_bridge_port_group __rcu **pp;
219 
220 		if (idx < s_idx)
221 			goto skip;
222 
223 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
224 		if (!nest2) {
225 			err = -EMSGSIZE;
226 			break;
227 		}
228 
229 		if (!s_pidx && mp->host_joined) {
230 			err = __mdb_fill_info(skb, mp, NULL);
231 			if (err) {
232 				nla_nest_cancel(skb, nest2);
233 				break;
234 			}
235 		}
236 
237 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
238 		      pp = &p->next) {
239 			if (!p->port)
240 				continue;
241 			if (pidx < s_pidx)
242 				goto skip_pg;
243 
244 			err = __mdb_fill_info(skb, mp, p);
245 			if (err) {
246 				nla_nest_cancel(skb, nest2);
247 				goto out;
248 			}
249 skip_pg:
250 			pidx++;
251 		}
252 		pidx = 0;
253 		s_pidx = 0;
254 		nla_nest_end(skb, nest2);
255 skip:
256 		idx++;
257 	}
258 
259 out:
260 	cb->args[1] = idx;
261 	cb->args[2] = pidx;
262 	nla_nest_end(skb, nest);
263 	return err;
264 }
265 
266 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
267 				 struct netlink_ext_ack *extack)
268 {
269 	struct br_port_msg *bpm;
270 
271 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
272 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
273 		return -EINVAL;
274 	}
275 
276 	bpm = nlmsg_data(nlh);
277 	if (bpm->ifindex) {
278 		NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
279 		return -EINVAL;
280 	}
281 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
282 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
283 		return -EINVAL;
284 	}
285 
286 	return 0;
287 }
288 
289 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
290 {
291 	struct net_device *dev;
292 	struct net *net = sock_net(skb->sk);
293 	struct nlmsghdr *nlh = NULL;
294 	int idx = 0, s_idx;
295 
296 	if (cb->strict_check) {
297 		int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
298 
299 		if (err < 0)
300 			return err;
301 	}
302 
303 	s_idx = cb->args[0];
304 
305 	rcu_read_lock();
306 
307 	cb->seq = net->dev_base_seq;
308 
309 	for_each_netdev_rcu(net, dev) {
310 		if (dev->priv_flags & IFF_EBRIDGE) {
311 			struct br_port_msg *bpm;
312 
313 			if (idx < s_idx)
314 				goto skip;
315 
316 			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
317 					cb->nlh->nlmsg_seq, RTM_GETMDB,
318 					sizeof(*bpm), NLM_F_MULTI);
319 			if (nlh == NULL)
320 				break;
321 
322 			bpm = nlmsg_data(nlh);
323 			memset(bpm, 0, sizeof(*bpm));
324 			bpm->ifindex = dev->ifindex;
325 			if (br_mdb_fill_info(skb, cb, dev) < 0)
326 				goto out;
327 			if (br_rports_fill_info(skb, cb, dev) < 0)
328 				goto out;
329 
330 			cb->args[1] = 0;
331 			nlmsg_end(skb, nlh);
332 		skip:
333 			idx++;
334 		}
335 	}
336 
337 out:
338 	if (nlh)
339 		nlmsg_end(skb, nlh);
340 	rcu_read_unlock();
341 	cb->args[0] = idx;
342 	return skb->len;
343 }
344 
345 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
346 				   struct net_device *dev,
347 				   struct net_bridge_mdb_entry *mp,
348 				   struct net_bridge_port_group *pg,
349 				   int type)
350 {
351 	struct nlmsghdr *nlh;
352 	struct br_port_msg *bpm;
353 	struct nlattr *nest, *nest2;
354 
355 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
356 	if (!nlh)
357 		return -EMSGSIZE;
358 
359 	bpm = nlmsg_data(nlh);
360 	memset(bpm, 0, sizeof(*bpm));
361 	bpm->family  = AF_BRIDGE;
362 	bpm->ifindex = dev->ifindex;
363 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
364 	if (nest == NULL)
365 		goto cancel;
366 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
367 	if (nest2 == NULL)
368 		goto end;
369 
370 	if (__mdb_fill_info(skb, mp, pg))
371 		goto end;
372 
373 	nla_nest_end(skb, nest2);
374 	nla_nest_end(skb, nest);
375 	nlmsg_end(skb, nlh);
376 	return 0;
377 
378 end:
379 	nla_nest_end(skb, nest);
380 cancel:
381 	nlmsg_cancel(skb, nlh);
382 	return -EMSGSIZE;
383 }
384 
385 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
386 {
387 	size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
388 			    nla_total_size(sizeof(struct br_mdb_entry)) +
389 			    nla_total_size(sizeof(u32));
390 	struct net_bridge_group_src *ent;
391 	size_t addr_size = 0;
392 
393 	if (!pg)
394 		goto out;
395 
396 	switch (pg->addr.proto) {
397 	case htons(ETH_P_IP):
398 		if (pg->port->br->multicast_igmp_version == 2)
399 			goto out;
400 		addr_size = sizeof(__be32);
401 		break;
402 #if IS_ENABLED(CONFIG_IPV6)
403 	case htons(ETH_P_IPV6):
404 		if (pg->port->br->multicast_mld_version == 1)
405 			goto out;
406 		addr_size = sizeof(struct in6_addr);
407 		break;
408 #endif
409 	}
410 
411 	/* MDBA_MDB_EATTR_GROUP_MODE */
412 	nlmsg_size += nla_total_size(sizeof(u8));
413 
414 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
415 	if (!hlist_empty(&pg->src_list))
416 		nlmsg_size += nla_total_size(0);
417 
418 	hlist_for_each_entry(ent, &pg->src_list, node) {
419 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
420 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
421 		 */
422 		nlmsg_size += nla_total_size(0) +
423 			      nla_total_size(addr_size) +
424 			      nla_total_size(sizeof(u32));
425 	}
426 out:
427 	return nlmsg_size;
428 }
429 
430 struct br_mdb_complete_info {
431 	struct net_bridge_port *port;
432 	struct br_ip ip;
433 };
434 
435 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
436 {
437 	struct br_mdb_complete_info *data = priv;
438 	struct net_bridge_port_group __rcu **pp;
439 	struct net_bridge_port_group *p;
440 	struct net_bridge_mdb_entry *mp;
441 	struct net_bridge_port *port = data->port;
442 	struct net_bridge *br = port->br;
443 
444 	if (err)
445 		goto err;
446 
447 	spin_lock_bh(&br->multicast_lock);
448 	mp = br_mdb_ip_get(br, &data->ip);
449 	if (!mp)
450 		goto out;
451 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
452 	     pp = &p->next) {
453 		if (p->port != port)
454 			continue;
455 		p->flags |= MDB_PG_FLAGS_OFFLOAD;
456 	}
457 out:
458 	spin_unlock_bh(&br->multicast_lock);
459 err:
460 	kfree(priv);
461 }
462 
463 static void br_mdb_switchdev_host_port(struct net_device *dev,
464 				       struct net_device *lower_dev,
465 				       struct net_bridge_mdb_entry *mp,
466 				       int type)
467 {
468 	struct switchdev_obj_port_mdb mdb = {
469 		.obj = {
470 			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
471 			.flags = SWITCHDEV_F_DEFER,
472 		},
473 		.vid = mp->addr.vid,
474 	};
475 
476 	if (mp->addr.proto == htons(ETH_P_IP))
477 		ip_eth_mc_map(mp->addr.u.ip4, mdb.addr);
478 #if IS_ENABLED(CONFIG_IPV6)
479 	else
480 		ipv6_eth_mc_map(&mp->addr.u.ip6, mdb.addr);
481 #endif
482 
483 	mdb.obj.orig_dev = dev;
484 	switch (type) {
485 	case RTM_NEWMDB:
486 		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
487 		break;
488 	case RTM_DELMDB:
489 		switchdev_port_obj_del(lower_dev, &mdb.obj);
490 		break;
491 	}
492 }
493 
494 static void br_mdb_switchdev_host(struct net_device *dev,
495 				  struct net_bridge_mdb_entry *mp, int type)
496 {
497 	struct net_device *lower_dev;
498 	struct list_head *iter;
499 
500 	netdev_for_each_lower_dev(dev, lower_dev, iter)
501 		br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
502 }
503 
504 void br_mdb_notify(struct net_device *dev,
505 		   struct net_bridge_mdb_entry *mp,
506 		   struct net_bridge_port_group *pg,
507 		   int type)
508 {
509 	struct br_mdb_complete_info *complete_info;
510 	struct switchdev_obj_port_mdb mdb = {
511 		.obj = {
512 			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
513 			.flags = SWITCHDEV_F_DEFER,
514 		},
515 		.vid = mp->addr.vid,
516 	};
517 	struct net *net = dev_net(dev);
518 	struct sk_buff *skb;
519 	int err = -ENOBUFS;
520 
521 	if (pg) {
522 		if (mp->addr.proto == htons(ETH_P_IP))
523 			ip_eth_mc_map(mp->addr.u.ip4, mdb.addr);
524 #if IS_ENABLED(CONFIG_IPV6)
525 		else
526 			ipv6_eth_mc_map(&mp->addr.u.ip6, mdb.addr);
527 #endif
528 		mdb.obj.orig_dev = pg->port->dev;
529 		switch (type) {
530 		case RTM_NEWMDB:
531 			complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
532 			if (!complete_info)
533 				break;
534 			complete_info->port = pg->port;
535 			complete_info->ip = mp->addr;
536 			mdb.obj.complete_priv = complete_info;
537 			mdb.obj.complete = br_mdb_complete;
538 			if (switchdev_port_obj_add(pg->port->dev, &mdb.obj, NULL))
539 				kfree(complete_info);
540 			break;
541 		case RTM_DELMDB:
542 			switchdev_port_obj_del(pg->port->dev, &mdb.obj);
543 			break;
544 		}
545 	} else {
546 		br_mdb_switchdev_host(dev, mp, type);
547 	}
548 
549 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
550 	if (!skb)
551 		goto errout;
552 
553 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
554 	if (err < 0) {
555 		kfree_skb(skb);
556 		goto errout;
557 	}
558 
559 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
560 	return;
561 errout:
562 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
563 }
564 
565 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
566 				   struct net_device *dev,
567 				   int ifindex, u32 pid,
568 				   u32 seq, int type, unsigned int flags)
569 {
570 	struct br_port_msg *bpm;
571 	struct nlmsghdr *nlh;
572 	struct nlattr *nest;
573 
574 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
575 	if (!nlh)
576 		return -EMSGSIZE;
577 
578 	bpm = nlmsg_data(nlh);
579 	memset(bpm, 0, sizeof(*bpm));
580 	bpm->family = AF_BRIDGE;
581 	bpm->ifindex = dev->ifindex;
582 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
583 	if (!nest)
584 		goto cancel;
585 
586 	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
587 		goto end;
588 
589 	nla_nest_end(skb, nest);
590 	nlmsg_end(skb, nlh);
591 	return 0;
592 
593 end:
594 	nla_nest_end(skb, nest);
595 cancel:
596 	nlmsg_cancel(skb, nlh);
597 	return -EMSGSIZE;
598 }
599 
600 static inline size_t rtnl_rtr_nlmsg_size(void)
601 {
602 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
603 		+ nla_total_size(sizeof(__u32));
604 }
605 
606 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
607 		   int type)
608 {
609 	struct net *net = dev_net(dev);
610 	struct sk_buff *skb;
611 	int err = -ENOBUFS;
612 	int ifindex;
613 
614 	ifindex = port ? port->dev->ifindex : 0;
615 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
616 	if (!skb)
617 		goto errout;
618 
619 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
620 	if (err < 0) {
621 		kfree_skb(skb);
622 		goto errout;
623 	}
624 
625 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
626 	return;
627 
628 errout:
629 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
630 }
631 
632 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
633 {
634 	if (entry->ifindex == 0)
635 		return false;
636 
637 	if (entry->addr.proto == htons(ETH_P_IP)) {
638 		if (!ipv4_is_multicast(entry->addr.u.ip4))
639 			return false;
640 		if (ipv4_is_local_multicast(entry->addr.u.ip4))
641 			return false;
642 #if IS_ENABLED(CONFIG_IPV6)
643 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
644 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
645 			return false;
646 #endif
647 	} else
648 		return false;
649 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
650 		return false;
651 	if (entry->vid >= VLAN_VID_MASK)
652 		return false;
653 
654 	return true;
655 }
656 
657 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
658 			struct net_device **pdev, struct br_mdb_entry **pentry)
659 {
660 	struct net *net = sock_net(skb->sk);
661 	struct br_mdb_entry *entry;
662 	struct br_port_msg *bpm;
663 	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
664 	struct net_device *dev;
665 	int err;
666 
667 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
668 				     MDBA_SET_ENTRY_MAX, NULL, NULL);
669 	if (err < 0)
670 		return err;
671 
672 	bpm = nlmsg_data(nlh);
673 	if (bpm->ifindex == 0) {
674 		pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
675 		return -EINVAL;
676 	}
677 
678 	dev = __dev_get_by_index(net, bpm->ifindex);
679 	if (dev == NULL) {
680 		pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
681 		return -ENODEV;
682 	}
683 
684 	if (!(dev->priv_flags & IFF_EBRIDGE)) {
685 		pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
686 		return -EOPNOTSUPP;
687 	}
688 
689 	*pdev = dev;
690 
691 	if (!tb[MDBA_SET_ENTRY] ||
692 	    nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
693 		pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
694 		return -EINVAL;
695 	}
696 
697 	entry = nla_data(tb[MDBA_SET_ENTRY]);
698 	if (!is_valid_mdb_entry(entry)) {
699 		pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
700 		return -EINVAL;
701 	}
702 
703 	*pentry = entry;
704 	return 0;
705 }
706 
707 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
708 			    struct br_ip *group, struct br_mdb_entry *entry)
709 {
710 	struct net_bridge_mdb_entry *mp;
711 	struct net_bridge_port_group *p;
712 	struct net_bridge_port_group __rcu **pp;
713 	unsigned long now = jiffies;
714 	int err;
715 
716 	mp = br_mdb_ip_get(br, group);
717 	if (!mp) {
718 		mp = br_multicast_new_group(br, group);
719 		err = PTR_ERR_OR_ZERO(mp);
720 		if (err)
721 			return err;
722 	}
723 
724 	/* host join */
725 	if (!port) {
726 		/* don't allow any flags for host-joined groups */
727 		if (entry->state)
728 			return -EINVAL;
729 		if (mp->host_joined)
730 			return -EEXIST;
731 
732 		br_multicast_host_join(mp, false);
733 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
734 
735 		return 0;
736 	}
737 
738 	for (pp = &mp->ports;
739 	     (p = mlock_dereference(*pp, br)) != NULL;
740 	     pp = &p->next) {
741 		if (p->port == port)
742 			return -EEXIST;
743 		if ((unsigned long)p->port < (unsigned long)port)
744 			break;
745 	}
746 
747 	p = br_multicast_new_port_group(port, group, *pp, entry->state, NULL,
748 					MCAST_EXCLUDE);
749 	if (unlikely(!p))
750 		return -ENOMEM;
751 	rcu_assign_pointer(*pp, p);
752 	if (entry->state == MDB_TEMPORARY)
753 		mod_timer(&p->timer, now + br->multicast_membership_interval);
754 	br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
755 
756 	return 0;
757 }
758 
759 static int __br_mdb_add(struct net *net, struct net_bridge *br,
760 			struct br_mdb_entry *entry)
761 {
762 	struct br_ip ip;
763 	struct net_device *dev;
764 	struct net_bridge_port *p = NULL;
765 	int ret;
766 
767 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
768 		return -EINVAL;
769 
770 	if (entry->ifindex != br->dev->ifindex) {
771 		dev = __dev_get_by_index(net, entry->ifindex);
772 		if (!dev)
773 			return -ENODEV;
774 
775 		p = br_port_get_rtnl(dev);
776 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
777 			return -EINVAL;
778 	}
779 
780 	__mdb_entry_to_br_ip(entry, &ip);
781 
782 	spin_lock_bh(&br->multicast_lock);
783 	ret = br_mdb_add_group(br, p, &ip, entry);
784 	spin_unlock_bh(&br->multicast_lock);
785 	return ret;
786 }
787 
788 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
789 		      struct netlink_ext_ack *extack)
790 {
791 	struct net *net = sock_net(skb->sk);
792 	struct net_bridge_vlan_group *vg;
793 	struct net_bridge_port *p = NULL;
794 	struct net_device *dev, *pdev;
795 	struct br_mdb_entry *entry;
796 	struct net_bridge_vlan *v;
797 	struct net_bridge *br;
798 	int err;
799 
800 	err = br_mdb_parse(skb, nlh, &dev, &entry);
801 	if (err < 0)
802 		return err;
803 
804 	br = netdev_priv(dev);
805 
806 	if (entry->ifindex != br->dev->ifindex) {
807 		pdev = __dev_get_by_index(net, entry->ifindex);
808 		if (!pdev)
809 			return -ENODEV;
810 
811 		p = br_port_get_rtnl(pdev);
812 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
813 			return -EINVAL;
814 		vg = nbp_vlan_group(p);
815 	} else {
816 		vg = br_vlan_group(br);
817 	}
818 
819 	/* If vlan filtering is enabled and VLAN is not specified
820 	 * install mdb entry on all vlans configured on the port.
821 	 */
822 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
823 		list_for_each_entry(v, &vg->vlan_list, vlist) {
824 			entry->vid = v->vid;
825 			err = __br_mdb_add(net, br, entry);
826 			if (err)
827 				break;
828 		}
829 	} else {
830 		err = __br_mdb_add(net, br, entry);
831 	}
832 
833 	return err;
834 }
835 
836 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
837 {
838 	struct net_bridge_mdb_entry *mp;
839 	struct net_bridge_port_group *p;
840 	struct net_bridge_port_group __rcu **pp;
841 	struct br_ip ip;
842 	int err = -EINVAL;
843 
844 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
845 		return -EINVAL;
846 
847 	__mdb_entry_to_br_ip(entry, &ip);
848 
849 	spin_lock_bh(&br->multicast_lock);
850 	mp = br_mdb_ip_get(br, &ip);
851 	if (!mp)
852 		goto unlock;
853 
854 	/* host leave */
855 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
856 		br_multicast_host_leave(mp, false);
857 		err = 0;
858 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
859 		if (!mp->ports && netif_running(br->dev))
860 			mod_timer(&mp->timer, jiffies);
861 		goto unlock;
862 	}
863 
864 	for (pp = &mp->ports;
865 	     (p = mlock_dereference(*pp, br)) != NULL;
866 	     pp = &p->next) {
867 		if (!p->port || p->port->dev->ifindex != entry->ifindex)
868 			continue;
869 
870 		if (p->port->state == BR_STATE_DISABLED)
871 			goto unlock;
872 
873 		br_multicast_del_pg(mp, p, pp);
874 		err = 0;
875 		break;
876 	}
877 
878 unlock:
879 	spin_unlock_bh(&br->multicast_lock);
880 	return err;
881 }
882 
883 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
884 		      struct netlink_ext_ack *extack)
885 {
886 	struct net *net = sock_net(skb->sk);
887 	struct net_bridge_vlan_group *vg;
888 	struct net_bridge_port *p = NULL;
889 	struct net_device *dev, *pdev;
890 	struct br_mdb_entry *entry;
891 	struct net_bridge_vlan *v;
892 	struct net_bridge *br;
893 	int err;
894 
895 	err = br_mdb_parse(skb, nlh, &dev, &entry);
896 	if (err < 0)
897 		return err;
898 
899 	br = netdev_priv(dev);
900 
901 	if (entry->ifindex != br->dev->ifindex) {
902 		pdev = __dev_get_by_index(net, entry->ifindex);
903 		if (!pdev)
904 			return -ENODEV;
905 
906 		p = br_port_get_rtnl(pdev);
907 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
908 			return -EINVAL;
909 		vg = nbp_vlan_group(p);
910 	} else {
911 		vg = br_vlan_group(br);
912 	}
913 
914 	/* If vlan filtering is enabled and VLAN is not specified
915 	 * delete mdb entry on all vlans configured on the port.
916 	 */
917 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
918 		list_for_each_entry(v, &vg->vlan_list, vlist) {
919 			entry->vid = v->vid;
920 			err = __br_mdb_del(br, entry);
921 		}
922 	} else {
923 		err = __br_mdb_del(br, entry);
924 	}
925 
926 	return err;
927 }
928 
929 void br_mdb_init(void)
930 {
931 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
932 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
933 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
934 }
935 
936 void br_mdb_uninit(void)
937 {
938 	rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
939 	rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
940 	rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
941 }
942