xref: /openbmc/linux/net/bridge/br_mdb.c (revision 0ad53fe3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static bool
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 			unsigned long *timer)
22 {
23 	*timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 	return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26 
27 static bool
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 			unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 	*timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 	return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 	*timer = 0;
36 	return false;
37 #endif
38 }
39 
40 static size_t __br_rports_one_size(void)
41 {
42 	return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 	       nla_total_size(sizeof(u8)) +  /* MDBA_ROUTER_PATTR_TYPE */
45 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 	       nla_total_size(sizeof(u32));  /* MDBA_ROUTER_PATTR_VID */
48 }
49 
50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51 {
52 	struct net_bridge_mcast_port *pmctx;
53 	size_t size = nla_total_size(0); /* MDBA_ROUTER */
54 
55 	rcu_read_lock();
56 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 				 ip4_rlist)
58 		size += __br_rports_one_size();
59 
60 #if IS_ENABLED(CONFIG_IPV6)
61 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 				 ip6_rlist)
63 		size += __br_rports_one_size();
64 #endif
65 	rcu_read_unlock();
66 
67 	return size;
68 }
69 
70 int br_rports_fill_info(struct sk_buff *skb,
71 			const struct net_bridge_mcast *brmctx)
72 {
73 	u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 	bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 	unsigned long ip4_timer, ip6_timer;
76 	struct nlattr *nest, *port_nest;
77 	struct net_bridge_port *p;
78 
79 	if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 		return 0;
81 
82 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
83 	if (nest == NULL)
84 		return -EMSGSIZE;
85 
86 	list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 		struct net_bridge_mcast_port *pmctx;
88 
89 		if (vid) {
90 			struct net_bridge_vlan *v;
91 
92 			v = br_vlan_find(nbp_vlan_group(p), vid);
93 			if (!v)
94 				continue;
95 			pmctx = &v->port_mcast_ctx;
96 		} else {
97 			pmctx = &p->multicast_ctx;
98 		}
99 
100 		have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 		have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
102 
103 		if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 			continue;
105 
106 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
107 		if (!port_nest)
108 			goto fail;
109 
110 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 				max(ip4_timer, ip6_timer)) ||
113 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 			       p->multicast_ctx.multicast_router) ||
115 		    (have_ip4_mc_rtr &&
116 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
117 				 ip4_timer)) ||
118 		    (have_ip6_mc_rtr &&
119 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
120 				 ip6_timer)) ||
121 		    (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 			nla_nest_cancel(skb, port_nest);
123 			goto fail;
124 		}
125 		nla_nest_end(skb, port_nest);
126 	}
127 
128 	nla_nest_end(skb, nest);
129 	return 0;
130 fail:
131 	nla_nest_cancel(skb, nest);
132 	return -EMSGSIZE;
133 }
134 
135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136 {
137 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 	e->flags = 0;
139 	if (flags & MDB_PG_FLAGS_OFFLOAD)
140 		e->flags |= MDB_FLAGS_OFFLOAD;
141 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 		e->flags |= MDB_FLAGS_FAST_LEAVE;
143 	if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 		e->flags |= MDB_FLAGS_STAR_EXCL;
145 	if (flags & MDB_PG_FLAGS_BLOCKED)
146 		e->flags |= MDB_FLAGS_BLOCKED;
147 }
148 
149 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
150 				 struct nlattr **mdb_attrs)
151 {
152 	memset(ip, 0, sizeof(struct br_ip));
153 	ip->vid = entry->vid;
154 	ip->proto = entry->addr.proto;
155 	switch (ip->proto) {
156 	case htons(ETH_P_IP):
157 		ip->dst.ip4 = entry->addr.u.ip4;
158 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
159 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
160 		break;
161 #if IS_ENABLED(CONFIG_IPV6)
162 	case htons(ETH_P_IPV6):
163 		ip->dst.ip6 = entry->addr.u.ip6;
164 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
165 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
166 		break;
167 #endif
168 	default:
169 		ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
170 	}
171 
172 }
173 
174 static int __mdb_fill_srcs(struct sk_buff *skb,
175 			   struct net_bridge_port_group *p)
176 {
177 	struct net_bridge_group_src *ent;
178 	struct nlattr *nest, *nest_ent;
179 
180 	if (hlist_empty(&p->src_list))
181 		return 0;
182 
183 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
184 	if (!nest)
185 		return -EMSGSIZE;
186 
187 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
188 				 lockdep_is_held(&p->key.port->br->multicast_lock)) {
189 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
190 		if (!nest_ent)
191 			goto out_cancel_err;
192 		switch (ent->addr.proto) {
193 		case htons(ETH_P_IP):
194 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
195 					    ent->addr.src.ip4)) {
196 				nla_nest_cancel(skb, nest_ent);
197 				goto out_cancel_err;
198 			}
199 			break;
200 #if IS_ENABLED(CONFIG_IPV6)
201 		case htons(ETH_P_IPV6):
202 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
203 					     &ent->addr.src.ip6)) {
204 				nla_nest_cancel(skb, nest_ent);
205 				goto out_cancel_err;
206 			}
207 			break;
208 #endif
209 		default:
210 			nla_nest_cancel(skb, nest_ent);
211 			continue;
212 		}
213 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
214 				br_timer_value(&ent->timer))) {
215 			nla_nest_cancel(skb, nest_ent);
216 			goto out_cancel_err;
217 		}
218 		nla_nest_end(skb, nest_ent);
219 	}
220 
221 	nla_nest_end(skb, nest);
222 
223 	return 0;
224 
225 out_cancel_err:
226 	nla_nest_cancel(skb, nest);
227 	return -EMSGSIZE;
228 }
229 
230 static int __mdb_fill_info(struct sk_buff *skb,
231 			   struct net_bridge_mdb_entry *mp,
232 			   struct net_bridge_port_group *p)
233 {
234 	bool dump_srcs_mode = false;
235 	struct timer_list *mtimer;
236 	struct nlattr *nest_ent;
237 	struct br_mdb_entry e;
238 	u8 flags = 0;
239 	int ifindex;
240 
241 	memset(&e, 0, sizeof(e));
242 	if (p) {
243 		ifindex = p->key.port->dev->ifindex;
244 		mtimer = &p->timer;
245 		flags = p->flags;
246 	} else {
247 		ifindex = mp->br->dev->ifindex;
248 		mtimer = &mp->timer;
249 	}
250 
251 	__mdb_entry_fill_flags(&e, flags);
252 	e.ifindex = ifindex;
253 	e.vid = mp->addr.vid;
254 	if (mp->addr.proto == htons(ETH_P_IP))
255 		e.addr.u.ip4 = mp->addr.dst.ip4;
256 #if IS_ENABLED(CONFIG_IPV6)
257 	else if (mp->addr.proto == htons(ETH_P_IPV6))
258 		e.addr.u.ip6 = mp->addr.dst.ip6;
259 #endif
260 	else
261 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
262 	e.addr.proto = mp->addr.proto;
263 	nest_ent = nla_nest_start_noflag(skb,
264 					 MDBA_MDB_ENTRY_INFO);
265 	if (!nest_ent)
266 		return -EMSGSIZE;
267 
268 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
269 	    nla_put_u32(skb,
270 			MDBA_MDB_EATTR_TIMER,
271 			br_timer_value(mtimer)))
272 		goto nest_err;
273 
274 	switch (mp->addr.proto) {
275 	case htons(ETH_P_IP):
276 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
277 		if (mp->addr.src.ip4) {
278 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
279 					    mp->addr.src.ip4))
280 				goto nest_err;
281 			break;
282 		}
283 		break;
284 #if IS_ENABLED(CONFIG_IPV6)
285 	case htons(ETH_P_IPV6):
286 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
287 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
288 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
289 					     &mp->addr.src.ip6))
290 				goto nest_err;
291 			break;
292 		}
293 		break;
294 #endif
295 	default:
296 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
297 	}
298 	if (p) {
299 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
300 			goto nest_err;
301 		if (dump_srcs_mode &&
302 		    (__mdb_fill_srcs(skb, p) ||
303 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
304 				p->filter_mode)))
305 			goto nest_err;
306 	}
307 	nla_nest_end(skb, nest_ent);
308 
309 	return 0;
310 
311 nest_err:
312 	nla_nest_cancel(skb, nest_ent);
313 	return -EMSGSIZE;
314 }
315 
316 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
317 			    struct net_device *dev)
318 {
319 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
320 	struct net_bridge *br = netdev_priv(dev);
321 	struct net_bridge_mdb_entry *mp;
322 	struct nlattr *nest, *nest2;
323 
324 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
325 		return 0;
326 
327 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
328 	if (nest == NULL)
329 		return -EMSGSIZE;
330 
331 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
332 		struct net_bridge_port_group *p;
333 		struct net_bridge_port_group __rcu **pp;
334 
335 		if (idx < s_idx)
336 			goto skip;
337 
338 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
339 		if (!nest2) {
340 			err = -EMSGSIZE;
341 			break;
342 		}
343 
344 		if (!s_pidx && mp->host_joined) {
345 			err = __mdb_fill_info(skb, mp, NULL);
346 			if (err) {
347 				nla_nest_cancel(skb, nest2);
348 				break;
349 			}
350 		}
351 
352 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
353 		      pp = &p->next) {
354 			if (!p->key.port)
355 				continue;
356 			if (pidx < s_pidx)
357 				goto skip_pg;
358 
359 			err = __mdb_fill_info(skb, mp, p);
360 			if (err) {
361 				nla_nest_end(skb, nest2);
362 				goto out;
363 			}
364 skip_pg:
365 			pidx++;
366 		}
367 		pidx = 0;
368 		s_pidx = 0;
369 		nla_nest_end(skb, nest2);
370 skip:
371 		idx++;
372 	}
373 
374 out:
375 	cb->args[1] = idx;
376 	cb->args[2] = pidx;
377 	nla_nest_end(skb, nest);
378 	return err;
379 }
380 
381 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
382 				 struct netlink_ext_ack *extack)
383 {
384 	struct br_port_msg *bpm;
385 
386 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
387 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
388 		return -EINVAL;
389 	}
390 
391 	bpm = nlmsg_data(nlh);
392 	if (bpm->ifindex) {
393 		NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
394 		return -EINVAL;
395 	}
396 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
397 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
398 		return -EINVAL;
399 	}
400 
401 	return 0;
402 }
403 
404 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
405 {
406 	struct net_device *dev;
407 	struct net *net = sock_net(skb->sk);
408 	struct nlmsghdr *nlh = NULL;
409 	int idx = 0, s_idx;
410 
411 	if (cb->strict_check) {
412 		int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
413 
414 		if (err < 0)
415 			return err;
416 	}
417 
418 	s_idx = cb->args[0];
419 
420 	rcu_read_lock();
421 
422 	cb->seq = net->dev_base_seq;
423 
424 	for_each_netdev_rcu(net, dev) {
425 		if (dev->priv_flags & IFF_EBRIDGE) {
426 			struct net_bridge *br = netdev_priv(dev);
427 			struct br_port_msg *bpm;
428 
429 			if (idx < s_idx)
430 				goto skip;
431 
432 			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
433 					cb->nlh->nlmsg_seq, RTM_GETMDB,
434 					sizeof(*bpm), NLM_F_MULTI);
435 			if (nlh == NULL)
436 				break;
437 
438 			bpm = nlmsg_data(nlh);
439 			memset(bpm, 0, sizeof(*bpm));
440 			bpm->ifindex = dev->ifindex;
441 			if (br_mdb_fill_info(skb, cb, dev) < 0)
442 				goto out;
443 			if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
444 				goto out;
445 
446 			cb->args[1] = 0;
447 			nlmsg_end(skb, nlh);
448 		skip:
449 			idx++;
450 		}
451 	}
452 
453 out:
454 	if (nlh)
455 		nlmsg_end(skb, nlh);
456 	rcu_read_unlock();
457 	cb->args[0] = idx;
458 	return skb->len;
459 }
460 
461 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
462 				   struct net_device *dev,
463 				   struct net_bridge_mdb_entry *mp,
464 				   struct net_bridge_port_group *pg,
465 				   int type)
466 {
467 	struct nlmsghdr *nlh;
468 	struct br_port_msg *bpm;
469 	struct nlattr *nest, *nest2;
470 
471 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
472 	if (!nlh)
473 		return -EMSGSIZE;
474 
475 	bpm = nlmsg_data(nlh);
476 	memset(bpm, 0, sizeof(*bpm));
477 	bpm->family  = AF_BRIDGE;
478 	bpm->ifindex = dev->ifindex;
479 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
480 	if (nest == NULL)
481 		goto cancel;
482 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
483 	if (nest2 == NULL)
484 		goto end;
485 
486 	if (__mdb_fill_info(skb, mp, pg))
487 		goto end;
488 
489 	nla_nest_end(skb, nest2);
490 	nla_nest_end(skb, nest);
491 	nlmsg_end(skb, nlh);
492 	return 0;
493 
494 end:
495 	nla_nest_end(skb, nest);
496 cancel:
497 	nlmsg_cancel(skb, nlh);
498 	return -EMSGSIZE;
499 }
500 
501 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
502 {
503 	size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
504 			    nla_total_size(sizeof(struct br_mdb_entry)) +
505 			    nla_total_size(sizeof(u32));
506 	struct net_bridge_group_src *ent;
507 	size_t addr_size = 0;
508 
509 	if (!pg)
510 		goto out;
511 
512 	/* MDBA_MDB_EATTR_RTPROT */
513 	nlmsg_size += nla_total_size(sizeof(u8));
514 
515 	switch (pg->key.addr.proto) {
516 	case htons(ETH_P_IP):
517 		/* MDBA_MDB_EATTR_SOURCE */
518 		if (pg->key.addr.src.ip4)
519 			nlmsg_size += nla_total_size(sizeof(__be32));
520 		if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
521 			goto out;
522 		addr_size = sizeof(__be32);
523 		break;
524 #if IS_ENABLED(CONFIG_IPV6)
525 	case htons(ETH_P_IPV6):
526 		/* MDBA_MDB_EATTR_SOURCE */
527 		if (!ipv6_addr_any(&pg->key.addr.src.ip6))
528 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
529 		if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
530 			goto out;
531 		addr_size = sizeof(struct in6_addr);
532 		break;
533 #endif
534 	}
535 
536 	/* MDBA_MDB_EATTR_GROUP_MODE */
537 	nlmsg_size += nla_total_size(sizeof(u8));
538 
539 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
540 	if (!hlist_empty(&pg->src_list))
541 		nlmsg_size += nla_total_size(0);
542 
543 	hlist_for_each_entry(ent, &pg->src_list, node) {
544 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
545 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
546 		 */
547 		nlmsg_size += nla_total_size(0) +
548 			      nla_total_size(addr_size) +
549 			      nla_total_size(sizeof(u32));
550 	}
551 out:
552 	return nlmsg_size;
553 }
554 
555 struct br_mdb_complete_info {
556 	struct net_bridge_port *port;
557 	struct br_ip ip;
558 };
559 
560 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
561 {
562 	struct br_mdb_complete_info *data = priv;
563 	struct net_bridge_port_group __rcu **pp;
564 	struct net_bridge_port_group *p;
565 	struct net_bridge_mdb_entry *mp;
566 	struct net_bridge_port *port = data->port;
567 	struct net_bridge *br = port->br;
568 
569 	if (err)
570 		goto err;
571 
572 	spin_lock_bh(&br->multicast_lock);
573 	mp = br_mdb_ip_get(br, &data->ip);
574 	if (!mp)
575 		goto out;
576 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
577 	     pp = &p->next) {
578 		if (p->key.port != port)
579 			continue;
580 		p->flags |= MDB_PG_FLAGS_OFFLOAD;
581 	}
582 out:
583 	spin_unlock_bh(&br->multicast_lock);
584 err:
585 	kfree(priv);
586 }
587 
588 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
589 				      const struct net_bridge_mdb_entry *mp)
590 {
591 	if (mp->addr.proto == htons(ETH_P_IP))
592 		ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
593 #if IS_ENABLED(CONFIG_IPV6)
594 	else if (mp->addr.proto == htons(ETH_P_IPV6))
595 		ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
596 #endif
597 	else
598 		ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
599 
600 	mdb->vid = mp->addr.vid;
601 }
602 
603 static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
604 			     const struct switchdev_obj_port_mdb *mdb,
605 			     unsigned long action, const void *ctx,
606 			     struct netlink_ext_ack *extack)
607 {
608 	struct switchdev_notifier_port_obj_info obj_info = {
609 		.info = {
610 			.dev = dev,
611 			.extack = extack,
612 			.ctx = ctx,
613 		},
614 		.obj = &mdb->obj,
615 	};
616 	int err;
617 
618 	err = nb->notifier_call(nb, action, &obj_info);
619 	return notifier_to_errno(err);
620 }
621 
622 static int br_mdb_queue_one(struct list_head *mdb_list,
623 			    enum switchdev_obj_id id,
624 			    const struct net_bridge_mdb_entry *mp,
625 			    struct net_device *orig_dev)
626 {
627 	struct switchdev_obj_port_mdb *mdb;
628 
629 	mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
630 	if (!mdb)
631 		return -ENOMEM;
632 
633 	mdb->obj.id = id;
634 	mdb->obj.orig_dev = orig_dev;
635 	br_switchdev_mdb_populate(mdb, mp);
636 	list_add_tail(&mdb->obj.list, mdb_list);
637 
638 	return 0;
639 }
640 
641 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
642 		  const void *ctx, bool adding, struct notifier_block *nb,
643 		  struct netlink_ext_ack *extack)
644 {
645 	const struct net_bridge_mdb_entry *mp;
646 	struct switchdev_obj *obj, *tmp;
647 	struct net_bridge *br;
648 	unsigned long action;
649 	LIST_HEAD(mdb_list);
650 	int err = 0;
651 
652 	ASSERT_RTNL();
653 
654 	if (!nb)
655 		return 0;
656 
657 	if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
658 		return -EINVAL;
659 
660 	br = netdev_priv(br_dev);
661 
662 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
663 		return 0;
664 
665 	/* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
666 	 * because the write-side protection is br->multicast_lock. But we
667 	 * need to emulate the [ blocking ] calling context of a regular
668 	 * switchdev event, so since both br->multicast_lock and RCU read side
669 	 * critical sections are atomic, we have no choice but to pick the RCU
670 	 * read side lock, queue up all our events, leave the critical section
671 	 * and notify switchdev from blocking context.
672 	 */
673 	rcu_read_lock();
674 
675 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
676 		struct net_bridge_port_group __rcu * const *pp;
677 		const struct net_bridge_port_group *p;
678 
679 		if (mp->host_joined) {
680 			err = br_mdb_queue_one(&mdb_list,
681 					       SWITCHDEV_OBJ_ID_HOST_MDB,
682 					       mp, br_dev);
683 			if (err) {
684 				rcu_read_unlock();
685 				goto out_free_mdb;
686 			}
687 		}
688 
689 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
690 		     pp = &p->next) {
691 			if (p->key.port->dev != dev)
692 				continue;
693 
694 			err = br_mdb_queue_one(&mdb_list,
695 					       SWITCHDEV_OBJ_ID_PORT_MDB,
696 					       mp, dev);
697 			if (err) {
698 				rcu_read_unlock();
699 				goto out_free_mdb;
700 			}
701 		}
702 	}
703 
704 	rcu_read_unlock();
705 
706 	if (adding)
707 		action = SWITCHDEV_PORT_OBJ_ADD;
708 	else
709 		action = SWITCHDEV_PORT_OBJ_DEL;
710 
711 	list_for_each_entry(obj, &mdb_list, list) {
712 		err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
713 					action, ctx, extack);
714 		if (err)
715 			goto out_free_mdb;
716 	}
717 
718 out_free_mdb:
719 	list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
720 		list_del(&obj->list);
721 		kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
722 	}
723 
724 	return err;
725 }
726 
727 static void br_mdb_switchdev_host_port(struct net_device *dev,
728 				       struct net_device *lower_dev,
729 				       struct net_bridge_mdb_entry *mp,
730 				       int type)
731 {
732 	struct switchdev_obj_port_mdb mdb = {
733 		.obj = {
734 			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
735 			.flags = SWITCHDEV_F_DEFER,
736 			.orig_dev = dev,
737 		},
738 	};
739 
740 	br_switchdev_mdb_populate(&mdb, mp);
741 
742 	switch (type) {
743 	case RTM_NEWMDB:
744 		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
745 		break;
746 	case RTM_DELMDB:
747 		switchdev_port_obj_del(lower_dev, &mdb.obj);
748 		break;
749 	}
750 }
751 
752 static void br_mdb_switchdev_host(struct net_device *dev,
753 				  struct net_bridge_mdb_entry *mp, int type)
754 {
755 	struct net_device *lower_dev;
756 	struct list_head *iter;
757 
758 	netdev_for_each_lower_dev(dev, lower_dev, iter)
759 		br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
760 }
761 
762 void br_mdb_notify(struct net_device *dev,
763 		   struct net_bridge_mdb_entry *mp,
764 		   struct net_bridge_port_group *pg,
765 		   int type)
766 {
767 	struct br_mdb_complete_info *complete_info;
768 	struct switchdev_obj_port_mdb mdb = {
769 		.obj = {
770 			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
771 			.flags = SWITCHDEV_F_DEFER,
772 		},
773 	};
774 	struct net *net = dev_net(dev);
775 	struct sk_buff *skb;
776 	int err = -ENOBUFS;
777 
778 	if (pg) {
779 		br_switchdev_mdb_populate(&mdb, mp);
780 
781 		mdb.obj.orig_dev = pg->key.port->dev;
782 		switch (type) {
783 		case RTM_NEWMDB:
784 			complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
785 			if (!complete_info)
786 				break;
787 			complete_info->port = pg->key.port;
788 			complete_info->ip = mp->addr;
789 			mdb.obj.complete_priv = complete_info;
790 			mdb.obj.complete = br_mdb_complete;
791 			if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
792 				kfree(complete_info);
793 			break;
794 		case RTM_DELMDB:
795 			switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
796 			break;
797 		}
798 	} else {
799 		br_mdb_switchdev_host(dev, mp, type);
800 	}
801 
802 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
803 	if (!skb)
804 		goto errout;
805 
806 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
807 	if (err < 0) {
808 		kfree_skb(skb);
809 		goto errout;
810 	}
811 
812 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
813 	return;
814 errout:
815 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
816 }
817 
818 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
819 				   struct net_device *dev,
820 				   int ifindex, u16 vid, u32 pid,
821 				   u32 seq, int type, unsigned int flags)
822 {
823 	struct nlattr *nest, *port_nest;
824 	struct br_port_msg *bpm;
825 	struct nlmsghdr *nlh;
826 
827 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
828 	if (!nlh)
829 		return -EMSGSIZE;
830 
831 	bpm = nlmsg_data(nlh);
832 	memset(bpm, 0, sizeof(*bpm));
833 	bpm->family = AF_BRIDGE;
834 	bpm->ifindex = dev->ifindex;
835 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
836 	if (!nest)
837 		goto cancel;
838 
839 	port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
840 	if (!port_nest)
841 		goto end;
842 	if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
843 		nla_nest_cancel(skb, port_nest);
844 		goto end;
845 	}
846 	if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
847 		nla_nest_cancel(skb, port_nest);
848 		goto end;
849 	}
850 	nla_nest_end(skb, port_nest);
851 
852 	nla_nest_end(skb, nest);
853 	nlmsg_end(skb, nlh);
854 	return 0;
855 
856 end:
857 	nla_nest_end(skb, nest);
858 cancel:
859 	nlmsg_cancel(skb, nlh);
860 	return -EMSGSIZE;
861 }
862 
863 static inline size_t rtnl_rtr_nlmsg_size(void)
864 {
865 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
866 		+ nla_total_size(sizeof(__u32))
867 		+ nla_total_size(sizeof(u16));
868 }
869 
870 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
871 		   int type)
872 {
873 	struct net *net = dev_net(dev);
874 	struct sk_buff *skb;
875 	int err = -ENOBUFS;
876 	int ifindex;
877 	u16 vid;
878 
879 	ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
880 	vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
881 							      0;
882 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
883 	if (!skb)
884 		goto errout;
885 
886 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
887 				      NTF_SELF);
888 	if (err < 0) {
889 		kfree_skb(skb);
890 		goto errout;
891 	}
892 
893 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
894 	return;
895 
896 errout:
897 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
898 }
899 
900 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
901 			       struct netlink_ext_ack *extack)
902 {
903 	if (entry->ifindex == 0) {
904 		NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
905 		return false;
906 	}
907 
908 	if (entry->addr.proto == htons(ETH_P_IP)) {
909 		if (!ipv4_is_multicast(entry->addr.u.ip4)) {
910 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
911 			return false;
912 		}
913 		if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
914 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
915 			return false;
916 		}
917 #if IS_ENABLED(CONFIG_IPV6)
918 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
919 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
920 			NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
921 			return false;
922 		}
923 #endif
924 	} else if (entry->addr.proto == 0) {
925 		/* L2 mdb */
926 		if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
927 			NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
928 			return false;
929 		}
930 	} else {
931 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
932 		return false;
933 	}
934 
935 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
936 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
937 		return false;
938 	}
939 	if (entry->vid >= VLAN_VID_MASK) {
940 		NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
941 		return false;
942 	}
943 
944 	return true;
945 }
946 
947 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
948 				struct netlink_ext_ack *extack)
949 {
950 	switch (proto) {
951 	case htons(ETH_P_IP):
952 		if (nla_len(attr) != sizeof(struct in_addr)) {
953 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
954 			return false;
955 		}
956 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
957 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
958 			return false;
959 		}
960 		break;
961 #if IS_ENABLED(CONFIG_IPV6)
962 	case htons(ETH_P_IPV6): {
963 		struct in6_addr src;
964 
965 		if (nla_len(attr) != sizeof(struct in6_addr)) {
966 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
967 			return false;
968 		}
969 		src = nla_get_in6_addr(attr);
970 		if (ipv6_addr_is_multicast(&src)) {
971 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
972 			return false;
973 		}
974 		break;
975 	}
976 #endif
977 	default:
978 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
979 		return false;
980 	}
981 
982 	return true;
983 }
984 
985 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
986 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
987 					      sizeof(struct in_addr),
988 					      sizeof(struct in6_addr)),
989 };
990 
991 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
992 			struct net_device **pdev, struct br_mdb_entry **pentry,
993 			struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
994 {
995 	struct net *net = sock_net(skb->sk);
996 	struct br_mdb_entry *entry;
997 	struct br_port_msg *bpm;
998 	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
999 	struct net_device *dev;
1000 	int err;
1001 
1002 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
1003 				     MDBA_SET_ENTRY_MAX, NULL, NULL);
1004 	if (err < 0)
1005 		return err;
1006 
1007 	bpm = nlmsg_data(nlh);
1008 	if (bpm->ifindex == 0) {
1009 		NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
1010 		return -EINVAL;
1011 	}
1012 
1013 	dev = __dev_get_by_index(net, bpm->ifindex);
1014 	if (dev == NULL) {
1015 		NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
1016 		return -ENODEV;
1017 	}
1018 
1019 	if (!(dev->priv_flags & IFF_EBRIDGE)) {
1020 		NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
1021 		return -EOPNOTSUPP;
1022 	}
1023 
1024 	*pdev = dev;
1025 
1026 	if (!tb[MDBA_SET_ENTRY]) {
1027 		NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
1028 		return -EINVAL;
1029 	}
1030 	if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
1031 		NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
1032 		return -EINVAL;
1033 	}
1034 
1035 	entry = nla_data(tb[MDBA_SET_ENTRY]);
1036 	if (!is_valid_mdb_entry(entry, extack))
1037 		return -EINVAL;
1038 	*pentry = entry;
1039 
1040 	if (tb[MDBA_SET_ENTRY_ATTRS]) {
1041 		err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
1042 				       tb[MDBA_SET_ENTRY_ATTRS],
1043 				       br_mdbe_attrs_pol, extack);
1044 		if (err)
1045 			return err;
1046 		if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1047 		    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1048 					 entry->addr.proto, extack))
1049 			return -EINVAL;
1050 	} else {
1051 		memset(mdb_attrs, 0,
1052 		       sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 static struct net_bridge_mcast *
1059 __br_mdb_choose_context(struct net_bridge *br,
1060 			const struct br_mdb_entry *entry,
1061 			struct netlink_ext_ack *extack)
1062 {
1063 	struct net_bridge_mcast *brmctx = NULL;
1064 	struct net_bridge_vlan *v;
1065 
1066 	if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
1067 		brmctx = &br->multicast_ctx;
1068 		goto out;
1069 	}
1070 
1071 	if (!entry->vid) {
1072 		NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
1073 		goto out;
1074 	}
1075 
1076 	v = br_vlan_find(br_vlan_group(br), entry->vid);
1077 	if (!v) {
1078 		NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
1079 		goto out;
1080 	}
1081 	if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
1082 		NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
1083 		goto out;
1084 	}
1085 	brmctx = &v->br_mcast_ctx;
1086 out:
1087 	return brmctx;
1088 }
1089 
1090 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
1091 			    struct br_mdb_entry *entry,
1092 			    struct nlattr **mdb_attrs,
1093 			    struct netlink_ext_ack *extack)
1094 {
1095 	struct net_bridge_mdb_entry *mp, *star_mp;
1096 	struct net_bridge_port_group __rcu **pp;
1097 	struct net_bridge_port_group *p;
1098 	struct net_bridge_mcast *brmctx;
1099 	struct br_ip group, star_group;
1100 	unsigned long now = jiffies;
1101 	unsigned char flags = 0;
1102 	u8 filter_mode;
1103 	int err;
1104 
1105 	__mdb_entry_to_br_ip(entry, &group, mdb_attrs);
1106 
1107 	brmctx = __br_mdb_choose_context(br, entry, extack);
1108 	if (!brmctx)
1109 		return -EINVAL;
1110 
1111 	/* host join errors which can happen before creating the group */
1112 	if (!port) {
1113 		/* don't allow any flags for host-joined groups */
1114 		if (entry->state) {
1115 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1116 			return -EINVAL;
1117 		}
1118 		if (!br_multicast_is_star_g(&group)) {
1119 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1120 			return -EINVAL;
1121 		}
1122 	}
1123 
1124 	if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
1125 		NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1126 		return -EINVAL;
1127 	}
1128 
1129 	mp = br_mdb_ip_get(br, &group);
1130 	if (!mp) {
1131 		mp = br_multicast_new_group(br, &group);
1132 		err = PTR_ERR_OR_ZERO(mp);
1133 		if (err)
1134 			return err;
1135 	}
1136 
1137 	/* host join */
1138 	if (!port) {
1139 		if (mp->host_joined) {
1140 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1141 			return -EEXIST;
1142 		}
1143 
1144 		br_multicast_host_join(brmctx, mp, false);
1145 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1146 
1147 		return 0;
1148 	}
1149 
1150 	for (pp = &mp->ports;
1151 	     (p = mlock_dereference(*pp, br)) != NULL;
1152 	     pp = &p->next) {
1153 		if (p->key.port == port) {
1154 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
1155 			return -EEXIST;
1156 		}
1157 		if ((unsigned long)p->key.port < (unsigned long)port)
1158 			break;
1159 	}
1160 
1161 	filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
1162 						       MCAST_INCLUDE;
1163 
1164 	if (entry->state == MDB_PERMANENT)
1165 		flags |= MDB_PG_FLAGS_PERMANENT;
1166 
1167 	p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
1168 					filter_mode, RTPROT_STATIC);
1169 	if (unlikely(!p)) {
1170 		NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1171 		return -ENOMEM;
1172 	}
1173 	rcu_assign_pointer(*pp, p);
1174 	if (entry->state == MDB_TEMPORARY)
1175 		mod_timer(&p->timer,
1176 			  now + brmctx->multicast_membership_interval);
1177 	br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1178 	/* if we are adding a new EXCLUDE port group (*,G) it needs to be also
1179 	 * added to all S,G entries for proper replication, if we are adding
1180 	 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
1181 	 * added to it for proper replication
1182 	 */
1183 	if (br_multicast_should_handle_mode(brmctx, group.proto)) {
1184 		switch (filter_mode) {
1185 		case MCAST_EXCLUDE:
1186 			br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1187 			break;
1188 		case MCAST_INCLUDE:
1189 			star_group = p->key.addr;
1190 			memset(&star_group.src, 0, sizeof(star_group.src));
1191 			star_mp = br_mdb_ip_get(br, &star_group);
1192 			if (star_mp)
1193 				br_multicast_sg_add_exclude_ports(star_mp, p);
1194 			break;
1195 		}
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 static int __br_mdb_add(struct net *net, struct net_bridge *br,
1202 			struct net_bridge_port *p,
1203 			struct br_mdb_entry *entry,
1204 			struct nlattr **mdb_attrs,
1205 			struct netlink_ext_ack *extack)
1206 {
1207 	int ret;
1208 
1209 	spin_lock_bh(&br->multicast_lock);
1210 	ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
1211 	spin_unlock_bh(&br->multicast_lock);
1212 
1213 	return ret;
1214 }
1215 
1216 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1217 		      struct netlink_ext_ack *extack)
1218 {
1219 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1220 	struct net *net = sock_net(skb->sk);
1221 	struct net_bridge_vlan_group *vg;
1222 	struct net_bridge_port *p = NULL;
1223 	struct net_device *dev, *pdev;
1224 	struct br_mdb_entry *entry;
1225 	struct net_bridge_vlan *v;
1226 	struct net_bridge *br;
1227 	int err;
1228 
1229 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1230 	if (err < 0)
1231 		return err;
1232 
1233 	br = netdev_priv(dev);
1234 
1235 	if (!netif_running(br->dev)) {
1236 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1237 		return -EINVAL;
1238 	}
1239 
1240 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1241 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1242 		return -EINVAL;
1243 	}
1244 
1245 	if (entry->ifindex != br->dev->ifindex) {
1246 		pdev = __dev_get_by_index(net, entry->ifindex);
1247 		if (!pdev) {
1248 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1249 			return -ENODEV;
1250 		}
1251 
1252 		p = br_port_get_rtnl(pdev);
1253 		if (!p) {
1254 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1255 			return -EINVAL;
1256 		}
1257 
1258 		if (p->br != br) {
1259 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1260 			return -EINVAL;
1261 		}
1262 		if (p->state == BR_STATE_DISABLED) {
1263 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1264 			return -EINVAL;
1265 		}
1266 		vg = nbp_vlan_group(p);
1267 	} else {
1268 		vg = br_vlan_group(br);
1269 	}
1270 
1271 	/* If vlan filtering is enabled and VLAN is not specified
1272 	 * install mdb entry on all vlans configured on the port.
1273 	 */
1274 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1275 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1276 			entry->vid = v->vid;
1277 			err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1278 			if (err)
1279 				break;
1280 		}
1281 	} else {
1282 		err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1283 	}
1284 
1285 	return err;
1286 }
1287 
1288 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1289 			struct nlattr **mdb_attrs)
1290 {
1291 	struct net_bridge_mdb_entry *mp;
1292 	struct net_bridge_port_group *p;
1293 	struct net_bridge_port_group __rcu **pp;
1294 	struct br_ip ip;
1295 	int err = -EINVAL;
1296 
1297 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1298 		return -EINVAL;
1299 
1300 	__mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1301 
1302 	spin_lock_bh(&br->multicast_lock);
1303 	mp = br_mdb_ip_get(br, &ip);
1304 	if (!mp)
1305 		goto unlock;
1306 
1307 	/* host leave */
1308 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1309 		br_multicast_host_leave(mp, false);
1310 		err = 0;
1311 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1312 		if (!mp->ports && netif_running(br->dev))
1313 			mod_timer(&mp->timer, jiffies);
1314 		goto unlock;
1315 	}
1316 
1317 	for (pp = &mp->ports;
1318 	     (p = mlock_dereference(*pp, br)) != NULL;
1319 	     pp = &p->next) {
1320 		if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1321 			continue;
1322 
1323 		if (p->key.port->state == BR_STATE_DISABLED)
1324 			goto unlock;
1325 
1326 		br_multicast_del_pg(mp, p, pp);
1327 		err = 0;
1328 		break;
1329 	}
1330 
1331 unlock:
1332 	spin_unlock_bh(&br->multicast_lock);
1333 	return err;
1334 }
1335 
1336 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1337 		      struct netlink_ext_ack *extack)
1338 {
1339 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1340 	struct net *net = sock_net(skb->sk);
1341 	struct net_bridge_vlan_group *vg;
1342 	struct net_bridge_port *p = NULL;
1343 	struct net_device *dev, *pdev;
1344 	struct br_mdb_entry *entry;
1345 	struct net_bridge_vlan *v;
1346 	struct net_bridge *br;
1347 	int err;
1348 
1349 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1350 	if (err < 0)
1351 		return err;
1352 
1353 	br = netdev_priv(dev);
1354 
1355 	if (entry->ifindex != br->dev->ifindex) {
1356 		pdev = __dev_get_by_index(net, entry->ifindex);
1357 		if (!pdev)
1358 			return -ENODEV;
1359 
1360 		p = br_port_get_rtnl(pdev);
1361 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1362 			return -EINVAL;
1363 		vg = nbp_vlan_group(p);
1364 	} else {
1365 		vg = br_vlan_group(br);
1366 	}
1367 
1368 	/* If vlan filtering is enabled and VLAN is not specified
1369 	 * delete mdb entry on all vlans configured on the port.
1370 	 */
1371 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1372 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1373 			entry->vid = v->vid;
1374 			err = __br_mdb_del(br, entry, mdb_attrs);
1375 		}
1376 	} else {
1377 		err = __br_mdb_del(br, entry, mdb_attrs);
1378 	}
1379 
1380 	return err;
1381 }
1382 
1383 void br_mdb_init(void)
1384 {
1385 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1386 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1387 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1388 }
1389 
1390 void br_mdb_uninit(void)
1391 {
1392 	rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1393 	rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1394 	rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1395 }
1396