xref: /openbmc/linux/net/bridge/br_if.c (revision d4295e12)
1 /*
2  *	Userspace interface
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_arp.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/if_ether.h>
24 #include <linux/slab.h>
25 #include <net/dsa.h>
26 #include <net/sock.h>
27 #include <linux/if_vlan.h>
28 #include <net/switchdev.h>
29 #include <net/net_namespace.h>
30 
31 #include "br_private.h"
32 
33 /*
34  * Determine initial path cost based on speed.
35  * using recommendations from 802.1d standard
36  *
37  * Since driver might sleep need to not be holding any locks.
38  */
39 static int port_cost(struct net_device *dev)
40 {
41 	struct ethtool_link_ksettings ecmd;
42 
43 	if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
44 		switch (ecmd.base.speed) {
45 		case SPEED_10000:
46 			return 2;
47 		case SPEED_1000:
48 			return 4;
49 		case SPEED_100:
50 			return 19;
51 		case SPEED_10:
52 			return 100;
53 		}
54 	}
55 
56 	/* Old silly heuristics based on name */
57 	if (!strncmp(dev->name, "lec", 3))
58 		return 7;
59 
60 	if (!strncmp(dev->name, "plip", 4))
61 		return 2500;
62 
63 	return 100;	/* assume old 10Mbps */
64 }
65 
66 
67 /* Check for port carrier transitions. */
68 void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
69 {
70 	struct net_device *dev = p->dev;
71 	struct net_bridge *br = p->br;
72 
73 	if (!(p->flags & BR_ADMIN_COST) &&
74 	    netif_running(dev) && netif_oper_up(dev))
75 		p->path_cost = port_cost(dev);
76 
77 	*notified = false;
78 	if (!netif_running(br->dev))
79 		return;
80 
81 	spin_lock_bh(&br->lock);
82 	if (netif_running(dev) && netif_oper_up(dev)) {
83 		if (p->state == BR_STATE_DISABLED) {
84 			br_stp_enable_port(p);
85 			*notified = true;
86 		}
87 	} else {
88 		if (p->state != BR_STATE_DISABLED) {
89 			br_stp_disable_port(p);
90 			*notified = true;
91 		}
92 	}
93 	spin_unlock_bh(&br->lock);
94 }
95 
96 static void br_port_set_promisc(struct net_bridge_port *p)
97 {
98 	int err = 0;
99 
100 	if (br_promisc_port(p))
101 		return;
102 
103 	err = dev_set_promiscuity(p->dev, 1);
104 	if (err)
105 		return;
106 
107 	br_fdb_unsync_static(p->br, p);
108 	p->flags |= BR_PROMISC;
109 }
110 
111 static void br_port_clear_promisc(struct net_bridge_port *p)
112 {
113 	int err;
114 
115 	/* Check if the port is already non-promisc or if it doesn't
116 	 * support UNICAST filtering.  Without unicast filtering support
117 	 * we'll end up re-enabling promisc mode anyway, so just check for
118 	 * it here.
119 	 */
120 	if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
121 		return;
122 
123 	/* Since we'll be clearing the promisc mode, program the port
124 	 * first so that we don't have interruption in traffic.
125 	 */
126 	err = br_fdb_sync_static(p->br, p);
127 	if (err)
128 		return;
129 
130 	dev_set_promiscuity(p->dev, -1);
131 	p->flags &= ~BR_PROMISC;
132 }
133 
134 /* When a port is added or removed or when certain port flags
135  * change, this function is called to automatically manage
136  * promiscuity setting of all the bridge ports.  We are always called
137  * under RTNL so can skip using rcu primitives.
138  */
139 void br_manage_promisc(struct net_bridge *br)
140 {
141 	struct net_bridge_port *p;
142 	bool set_all = false;
143 
144 	/* If vlan filtering is disabled or bridge interface is placed
145 	 * into promiscuous mode, place all ports in promiscuous mode.
146 	 */
147 	if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
148 		set_all = true;
149 
150 	list_for_each_entry(p, &br->port_list, list) {
151 		if (set_all) {
152 			br_port_set_promisc(p);
153 		} else {
154 			/* If the number of auto-ports is <= 1, then all other
155 			 * ports will have their output configuration
156 			 * statically specified through fdbs.  Since ingress
157 			 * on the auto-port becomes forwarding/egress to other
158 			 * ports and egress configuration is statically known,
159 			 * we can say that ingress configuration of the
160 			 * auto-port is also statically known.
161 			 * This lets us disable promiscuous mode and write
162 			 * this config to hw.
163 			 */
164 			if (br->auto_cnt == 0 ||
165 			    (br->auto_cnt == 1 && br_auto_port(p)))
166 				br_port_clear_promisc(p);
167 			else
168 				br_port_set_promisc(p);
169 		}
170 	}
171 }
172 
173 int nbp_backup_change(struct net_bridge_port *p,
174 		      struct net_device *backup_dev)
175 {
176 	struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
177 	struct net_bridge_port *backup_p = NULL;
178 
179 	ASSERT_RTNL();
180 
181 	if (backup_dev) {
182 		if (!br_port_exists(backup_dev))
183 			return -ENOENT;
184 
185 		backup_p = br_port_get_rtnl(backup_dev);
186 		if (backup_p->br != p->br)
187 			return -EINVAL;
188 	}
189 
190 	if (p == backup_p)
191 		return -EINVAL;
192 
193 	if (old_backup == backup_p)
194 		return 0;
195 
196 	/* if the backup link is already set, clear it */
197 	if (old_backup)
198 		old_backup->backup_redirected_cnt--;
199 
200 	if (backup_p)
201 		backup_p->backup_redirected_cnt++;
202 	rcu_assign_pointer(p->backup_port, backup_p);
203 
204 	return 0;
205 }
206 
207 static void nbp_backup_clear(struct net_bridge_port *p)
208 {
209 	nbp_backup_change(p, NULL);
210 	if (p->backup_redirected_cnt) {
211 		struct net_bridge_port *cur_p;
212 
213 		list_for_each_entry(cur_p, &p->br->port_list, list) {
214 			struct net_bridge_port *backup_p;
215 
216 			backup_p = rtnl_dereference(cur_p->backup_port);
217 			if (backup_p == p)
218 				nbp_backup_change(cur_p, NULL);
219 		}
220 	}
221 
222 	WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
223 }
224 
225 static void nbp_update_port_count(struct net_bridge *br)
226 {
227 	struct net_bridge_port *p;
228 	u32 cnt = 0;
229 
230 	list_for_each_entry(p, &br->port_list, list) {
231 		if (br_auto_port(p))
232 			cnt++;
233 	}
234 	if (br->auto_cnt != cnt) {
235 		br->auto_cnt = cnt;
236 		br_manage_promisc(br);
237 	}
238 }
239 
240 static void nbp_delete_promisc(struct net_bridge_port *p)
241 {
242 	/* If port is currently promiscuous, unset promiscuity.
243 	 * Otherwise, it is a static port so remove all addresses
244 	 * from it.
245 	 */
246 	dev_set_allmulti(p->dev, -1);
247 	if (br_promisc_port(p))
248 		dev_set_promiscuity(p->dev, -1);
249 	else
250 		br_fdb_unsync_static(p->br, p);
251 }
252 
253 static void release_nbp(struct kobject *kobj)
254 {
255 	struct net_bridge_port *p
256 		= container_of(kobj, struct net_bridge_port, kobj);
257 	kfree(p);
258 }
259 
260 static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
261 {
262 	struct net_bridge_port *p = kobj_to_brport(kobj);
263 
264 	net_ns_get_ownership(dev_net(p->dev), uid, gid);
265 }
266 
267 static struct kobj_type brport_ktype = {
268 #ifdef CONFIG_SYSFS
269 	.sysfs_ops = &brport_sysfs_ops,
270 #endif
271 	.release = release_nbp,
272 	.get_ownership = brport_get_ownership,
273 };
274 
275 static void destroy_nbp(struct net_bridge_port *p)
276 {
277 	struct net_device *dev = p->dev;
278 
279 	p->br = NULL;
280 	p->dev = NULL;
281 	dev_put(dev);
282 
283 	kobject_put(&p->kobj);
284 }
285 
286 static void destroy_nbp_rcu(struct rcu_head *head)
287 {
288 	struct net_bridge_port *p =
289 			container_of(head, struct net_bridge_port, rcu);
290 	destroy_nbp(p);
291 }
292 
293 static unsigned get_max_headroom(struct net_bridge *br)
294 {
295 	unsigned max_headroom = 0;
296 	struct net_bridge_port *p;
297 
298 	list_for_each_entry(p, &br->port_list, list) {
299 		unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
300 
301 		if (dev_headroom > max_headroom)
302 			max_headroom = dev_headroom;
303 	}
304 
305 	return max_headroom;
306 }
307 
308 static void update_headroom(struct net_bridge *br, int new_hr)
309 {
310 	struct net_bridge_port *p;
311 
312 	list_for_each_entry(p, &br->port_list, list)
313 		netdev_set_rx_headroom(p->dev, new_hr);
314 
315 	br->dev->needed_headroom = new_hr;
316 }
317 
318 /* Delete port(interface) from bridge is done in two steps.
319  * via RCU. First step, marks device as down. That deletes
320  * all the timers and stops new packets from flowing through.
321  *
322  * Final cleanup doesn't occur until after all CPU's finished
323  * processing packets.
324  *
325  * Protected from multiple admin operations by RTNL mutex
326  */
327 static void del_nbp(struct net_bridge_port *p)
328 {
329 	struct net_bridge *br = p->br;
330 	struct net_device *dev = p->dev;
331 
332 	sysfs_remove_link(br->ifobj, p->dev->name);
333 
334 	nbp_delete_promisc(p);
335 
336 	spin_lock_bh(&br->lock);
337 	br_stp_disable_port(p);
338 	spin_unlock_bh(&br->lock);
339 
340 	br_ifinfo_notify(RTM_DELLINK, NULL, p);
341 
342 	list_del_rcu(&p->list);
343 	if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
344 		update_headroom(br, get_max_headroom(br));
345 	netdev_reset_rx_headroom(dev);
346 
347 	nbp_vlan_flush(p);
348 	br_fdb_delete_by_port(br, p, 0, 1);
349 	switchdev_deferred_process();
350 	nbp_backup_clear(p);
351 
352 	nbp_update_port_count(br);
353 
354 	netdev_upper_dev_unlink(dev, br->dev);
355 
356 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
357 
358 	netdev_rx_handler_unregister(dev);
359 
360 	br_multicast_del_port(p);
361 
362 	kobject_uevent(&p->kobj, KOBJ_REMOVE);
363 	kobject_del(&p->kobj);
364 
365 	br_netpoll_disable(p);
366 
367 	call_rcu(&p->rcu, destroy_nbp_rcu);
368 }
369 
370 /* Delete bridge device */
371 void br_dev_delete(struct net_device *dev, struct list_head *head)
372 {
373 	struct net_bridge *br = netdev_priv(dev);
374 	struct net_bridge_port *p, *n;
375 
376 	list_for_each_entry_safe(p, n, &br->port_list, list) {
377 		del_nbp(p);
378 	}
379 
380 	br_recalculate_neigh_suppress_enabled(br);
381 
382 	br_fdb_delete_by_port(br, NULL, 0, 1);
383 
384 	cancel_delayed_work_sync(&br->gc_work);
385 
386 	br_sysfs_delbr(br->dev);
387 	unregister_netdevice_queue(br->dev, head);
388 }
389 
390 /* find an available port number */
391 static int find_portno(struct net_bridge *br)
392 {
393 	int index;
394 	struct net_bridge_port *p;
395 	unsigned long *inuse;
396 
397 	inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
398 	if (!inuse)
399 		return -ENOMEM;
400 
401 	set_bit(0, inuse);	/* zero is reserved */
402 	list_for_each_entry(p, &br->port_list, list) {
403 		set_bit(p->port_no, inuse);
404 	}
405 	index = find_first_zero_bit(inuse, BR_MAX_PORTS);
406 	bitmap_free(inuse);
407 
408 	return (index >= BR_MAX_PORTS) ? -EXFULL : index;
409 }
410 
411 /* called with RTNL but without bridge lock */
412 static struct net_bridge_port *new_nbp(struct net_bridge *br,
413 				       struct net_device *dev)
414 {
415 	struct net_bridge_port *p;
416 	int index, err;
417 
418 	index = find_portno(br);
419 	if (index < 0)
420 		return ERR_PTR(index);
421 
422 	p = kzalloc(sizeof(*p), GFP_KERNEL);
423 	if (p == NULL)
424 		return ERR_PTR(-ENOMEM);
425 
426 	p->br = br;
427 	dev_hold(dev);
428 	p->dev = dev;
429 	p->path_cost = port_cost(dev);
430 	p->priority = 0x8000 >> BR_PORT_BITS;
431 	p->port_no = index;
432 	p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
433 	br_init_port(p);
434 	br_set_state(p, BR_STATE_DISABLED);
435 	br_stp_port_timer_init(p);
436 	err = br_multicast_add_port(p);
437 	if (err) {
438 		dev_put(dev);
439 		kfree(p);
440 		p = ERR_PTR(err);
441 	}
442 
443 	return p;
444 }
445 
446 int br_add_bridge(struct net *net, const char *name)
447 {
448 	struct net_device *dev;
449 	int res;
450 
451 	dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
452 			   br_dev_setup);
453 
454 	if (!dev)
455 		return -ENOMEM;
456 
457 	dev_net_set(dev, net);
458 	dev->rtnl_link_ops = &br_link_ops;
459 
460 	res = register_netdev(dev);
461 	if (res)
462 		free_netdev(dev);
463 	return res;
464 }
465 
466 int br_del_bridge(struct net *net, const char *name)
467 {
468 	struct net_device *dev;
469 	int ret = 0;
470 
471 	rtnl_lock();
472 	dev = __dev_get_by_name(net, name);
473 	if (dev == NULL)
474 		ret =  -ENXIO; 	/* Could not find device */
475 
476 	else if (!(dev->priv_flags & IFF_EBRIDGE)) {
477 		/* Attempt to delete non bridge device! */
478 		ret = -EPERM;
479 	}
480 
481 	else if (dev->flags & IFF_UP) {
482 		/* Not shutdown yet. */
483 		ret = -EBUSY;
484 	}
485 
486 	else
487 		br_dev_delete(dev, NULL);
488 
489 	rtnl_unlock();
490 	return ret;
491 }
492 
493 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
494 static int br_mtu_min(const struct net_bridge *br)
495 {
496 	const struct net_bridge_port *p;
497 	int ret_mtu = 0;
498 
499 	list_for_each_entry(p, &br->port_list, list)
500 		if (!ret_mtu || ret_mtu > p->dev->mtu)
501 			ret_mtu = p->dev->mtu;
502 
503 	return ret_mtu ? ret_mtu : ETH_DATA_LEN;
504 }
505 
506 void br_mtu_auto_adjust(struct net_bridge *br)
507 {
508 	ASSERT_RTNL();
509 
510 	/* if the bridge MTU was manually configured don't mess with it */
511 	if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
512 		return;
513 
514 	/* change to the minimum MTU and clear the flag which was set by
515 	 * the bridge ndo_change_mtu callback
516 	 */
517 	dev_set_mtu(br->dev, br_mtu_min(br));
518 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
519 }
520 
521 static void br_set_gso_limits(struct net_bridge *br)
522 {
523 	unsigned int gso_max_size = GSO_MAX_SIZE;
524 	u16 gso_max_segs = GSO_MAX_SEGS;
525 	const struct net_bridge_port *p;
526 
527 	list_for_each_entry(p, &br->port_list, list) {
528 		gso_max_size = min(gso_max_size, p->dev->gso_max_size);
529 		gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
530 	}
531 	br->dev->gso_max_size = gso_max_size;
532 	br->dev->gso_max_segs = gso_max_segs;
533 }
534 
535 /*
536  * Recomputes features using slave's features
537  */
538 netdev_features_t br_features_recompute(struct net_bridge *br,
539 	netdev_features_t features)
540 {
541 	struct net_bridge_port *p;
542 	netdev_features_t mask;
543 
544 	if (list_empty(&br->port_list))
545 		return features;
546 
547 	mask = features;
548 	features &= ~NETIF_F_ONE_FOR_ALL;
549 
550 	list_for_each_entry(p, &br->port_list, list) {
551 		features = netdev_increment_features(features,
552 						     p->dev->features, mask);
553 	}
554 	features = netdev_add_tso_features(features, mask);
555 
556 	return features;
557 }
558 
559 /* called with RTNL */
560 int br_add_if(struct net_bridge *br, struct net_device *dev,
561 	      struct netlink_ext_ack *extack)
562 {
563 	struct net_bridge_port *p;
564 	int err = 0;
565 	unsigned br_hr, dev_hr;
566 	bool changed_addr;
567 
568 	/* Don't allow bridging non-ethernet like devices, or DSA-enabled
569 	 * master network devices since the bridge layer rx_handler prevents
570 	 * the DSA fake ethertype handler to be invoked, so we do not strip off
571 	 * the DSA switch tag protocol header and the bridge layer just return
572 	 * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
573 	 */
574 	if ((dev->flags & IFF_LOOPBACK) ||
575 	    dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
576 	    !is_valid_ether_addr(dev->dev_addr) ||
577 	    netdev_uses_dsa(dev))
578 		return -EINVAL;
579 
580 	/* No bridging of bridges */
581 	if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
582 		NL_SET_ERR_MSG(extack,
583 			       "Can not enslave a bridge to a bridge");
584 		return -ELOOP;
585 	}
586 
587 	/* Device has master upper dev */
588 	if (netdev_master_upper_dev_get(dev))
589 		return -EBUSY;
590 
591 	/* No bridging devices that dislike that (e.g. wireless) */
592 	if (dev->priv_flags & IFF_DONT_BRIDGE) {
593 		NL_SET_ERR_MSG(extack,
594 			       "Device does not allow enslaving to a bridge");
595 		return -EOPNOTSUPP;
596 	}
597 
598 	p = new_nbp(br, dev);
599 	if (IS_ERR(p))
600 		return PTR_ERR(p);
601 
602 	call_netdevice_notifiers(NETDEV_JOIN, dev);
603 
604 	err = dev_set_allmulti(dev, 1);
605 	if (err)
606 		goto put_back;
607 
608 	err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
609 				   SYSFS_BRIDGE_PORT_ATTR);
610 	if (err)
611 		goto err1;
612 
613 	err = br_sysfs_addif(p);
614 	if (err)
615 		goto err2;
616 
617 	err = br_netpoll_enable(p);
618 	if (err)
619 		goto err3;
620 
621 	err = netdev_rx_handler_register(dev, br_handle_frame, p);
622 	if (err)
623 		goto err4;
624 
625 	dev->priv_flags |= IFF_BRIDGE_PORT;
626 
627 	err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
628 	if (err)
629 		goto err5;
630 
631 	err = nbp_switchdev_mark_set(p);
632 	if (err)
633 		goto err6;
634 
635 	dev_disable_lro(dev);
636 
637 	list_add_rcu(&p->list, &br->port_list);
638 
639 	nbp_update_port_count(br);
640 
641 	netdev_update_features(br->dev);
642 
643 	br_hr = br->dev->needed_headroom;
644 	dev_hr = netdev_get_fwd_headroom(dev);
645 	if (br_hr < dev_hr)
646 		update_headroom(br, dev_hr);
647 	else
648 		netdev_set_rx_headroom(dev, br_hr);
649 
650 	if (br_fdb_insert(br, p, dev->dev_addr, 0))
651 		netdev_err(dev, "failed insert local address bridge forwarding table\n");
652 
653 	err = nbp_vlan_init(p);
654 	if (err) {
655 		netdev_err(dev, "failed to initialize vlan filtering on this port\n");
656 		goto err7;
657 	}
658 
659 	spin_lock_bh(&br->lock);
660 	changed_addr = br_stp_recalculate_bridge_id(br);
661 
662 	if (netif_running(dev) && netif_oper_up(dev) &&
663 	    (br->dev->flags & IFF_UP))
664 		br_stp_enable_port(p);
665 	spin_unlock_bh(&br->lock);
666 
667 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
668 
669 	if (changed_addr)
670 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
671 
672 	br_mtu_auto_adjust(br);
673 	br_set_gso_limits(br);
674 
675 	kobject_uevent(&p->kobj, KOBJ_ADD);
676 
677 	return 0;
678 
679 err7:
680 	list_del_rcu(&p->list);
681 	br_fdb_delete_by_port(br, p, 0, 1);
682 	nbp_update_port_count(br);
683 err6:
684 	netdev_upper_dev_unlink(dev, br->dev);
685 err5:
686 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
687 	netdev_rx_handler_unregister(dev);
688 err4:
689 	br_netpoll_disable(p);
690 err3:
691 	sysfs_remove_link(br->ifobj, p->dev->name);
692 err2:
693 	kobject_put(&p->kobj);
694 	p = NULL; /* kobject_put frees */
695 err1:
696 	dev_set_allmulti(dev, -1);
697 put_back:
698 	dev_put(dev);
699 	kfree(p);
700 	return err;
701 }
702 
703 /* called with RTNL */
704 int br_del_if(struct net_bridge *br, struct net_device *dev)
705 {
706 	struct net_bridge_port *p;
707 	bool changed_addr;
708 
709 	p = br_port_get_rtnl(dev);
710 	if (!p || p->br != br)
711 		return -EINVAL;
712 
713 	/* Since more than one interface can be attached to a bridge,
714 	 * there still maybe an alternate path for netconsole to use;
715 	 * therefore there is no reason for a NETDEV_RELEASE event.
716 	 */
717 	del_nbp(p);
718 
719 	br_mtu_auto_adjust(br);
720 	br_set_gso_limits(br);
721 
722 	spin_lock_bh(&br->lock);
723 	changed_addr = br_stp_recalculate_bridge_id(br);
724 	spin_unlock_bh(&br->lock);
725 
726 	if (changed_addr)
727 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
728 
729 	netdev_update_features(br->dev);
730 
731 	return 0;
732 }
733 
734 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
735 {
736 	struct net_bridge *br = p->br;
737 
738 	if (mask & BR_AUTO_MASK)
739 		nbp_update_port_count(br);
740 
741 	if (mask & BR_NEIGH_SUPPRESS)
742 		br_recalculate_neigh_suppress_enabled(br);
743 }
744