xref: /openbmc/linux/net/bridge/br_if.c (revision 965f22bc)
1 /*
2  *	Userspace interface
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_arp.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/if_ether.h>
24 #include <linux/slab.h>
25 #include <net/dsa.h>
26 #include <net/sock.h>
27 #include <linux/if_vlan.h>
28 #include <net/switchdev.h>
29 #include <net/net_namespace.h>
30 
31 #include "br_private.h"
32 
33 /*
34  * Determine initial path cost based on speed.
35  * using recommendations from 802.1d standard
36  *
37  * Since driver might sleep need to not be holding any locks.
38  */
39 static int port_cost(struct net_device *dev)
40 {
41 	struct ethtool_link_ksettings ecmd;
42 
43 	if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
44 		switch (ecmd.base.speed) {
45 		case SPEED_10000:
46 			return 2;
47 		case SPEED_1000:
48 			return 4;
49 		case SPEED_100:
50 			return 19;
51 		case SPEED_10:
52 			return 100;
53 		}
54 	}
55 
56 	/* Old silly heuristics based on name */
57 	if (!strncmp(dev->name, "lec", 3))
58 		return 7;
59 
60 	if (!strncmp(dev->name, "plip", 4))
61 		return 2500;
62 
63 	return 100;	/* assume old 10Mbps */
64 }
65 
66 
67 /* Check for port carrier transitions. */
68 void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
69 {
70 	struct net_device *dev = p->dev;
71 	struct net_bridge *br = p->br;
72 
73 	if (!(p->flags & BR_ADMIN_COST) &&
74 	    netif_running(dev) && netif_oper_up(dev))
75 		p->path_cost = port_cost(dev);
76 
77 	*notified = false;
78 	if (!netif_running(br->dev))
79 		return;
80 
81 	spin_lock_bh(&br->lock);
82 	if (netif_running(dev) && netif_oper_up(dev)) {
83 		if (p->state == BR_STATE_DISABLED) {
84 			br_stp_enable_port(p);
85 			*notified = true;
86 		}
87 	} else {
88 		if (p->state != BR_STATE_DISABLED) {
89 			br_stp_disable_port(p);
90 			*notified = true;
91 		}
92 	}
93 	spin_unlock_bh(&br->lock);
94 }
95 
96 static void br_port_set_promisc(struct net_bridge_port *p)
97 {
98 	int err = 0;
99 
100 	if (br_promisc_port(p))
101 		return;
102 
103 	err = dev_set_promiscuity(p->dev, 1);
104 	if (err)
105 		return;
106 
107 	br_fdb_unsync_static(p->br, p);
108 	p->flags |= BR_PROMISC;
109 }
110 
111 static void br_port_clear_promisc(struct net_bridge_port *p)
112 {
113 	int err;
114 
115 	/* Check if the port is already non-promisc or if it doesn't
116 	 * support UNICAST filtering.  Without unicast filtering support
117 	 * we'll end up re-enabling promisc mode anyway, so just check for
118 	 * it here.
119 	 */
120 	if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
121 		return;
122 
123 	/* Since we'll be clearing the promisc mode, program the port
124 	 * first so that we don't have interruption in traffic.
125 	 */
126 	err = br_fdb_sync_static(p->br, p);
127 	if (err)
128 		return;
129 
130 	dev_set_promiscuity(p->dev, -1);
131 	p->flags &= ~BR_PROMISC;
132 }
133 
134 /* When a port is added or removed or when certain port flags
135  * change, this function is called to automatically manage
136  * promiscuity setting of all the bridge ports.  We are always called
137  * under RTNL so can skip using rcu primitives.
138  */
139 void br_manage_promisc(struct net_bridge *br)
140 {
141 	struct net_bridge_port *p;
142 	bool set_all = false;
143 
144 	/* If vlan filtering is disabled or bridge interface is placed
145 	 * into promiscuous mode, place all ports in promiscuous mode.
146 	 */
147 	if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
148 		set_all = true;
149 
150 	list_for_each_entry(p, &br->port_list, list) {
151 		if (set_all) {
152 			br_port_set_promisc(p);
153 		} else {
154 			/* If the number of auto-ports is <= 1, then all other
155 			 * ports will have their output configuration
156 			 * statically specified through fdbs.  Since ingress
157 			 * on the auto-port becomes forwarding/egress to other
158 			 * ports and egress configuration is statically known,
159 			 * we can say that ingress configuration of the
160 			 * auto-port is also statically known.
161 			 * This lets us disable promiscuous mode and write
162 			 * this config to hw.
163 			 */
164 			if (br->auto_cnt == 0 ||
165 			    (br->auto_cnt == 1 && br_auto_port(p)))
166 				br_port_clear_promisc(p);
167 			else
168 				br_port_set_promisc(p);
169 		}
170 	}
171 }
172 
173 int nbp_backup_change(struct net_bridge_port *p,
174 		      struct net_device *backup_dev)
175 {
176 	struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
177 	struct net_bridge_port *backup_p = NULL;
178 
179 	ASSERT_RTNL();
180 
181 	if (backup_dev) {
182 		if (!br_port_exists(backup_dev))
183 			return -ENOENT;
184 
185 		backup_p = br_port_get_rtnl(backup_dev);
186 		if (backup_p->br != p->br)
187 			return -EINVAL;
188 	}
189 
190 	if (p == backup_p)
191 		return -EINVAL;
192 
193 	if (old_backup == backup_p)
194 		return 0;
195 
196 	/* if the backup link is already set, clear it */
197 	if (old_backup)
198 		old_backup->backup_redirected_cnt--;
199 
200 	if (backup_p)
201 		backup_p->backup_redirected_cnt++;
202 	rcu_assign_pointer(p->backup_port, backup_p);
203 
204 	return 0;
205 }
206 
207 static void nbp_backup_clear(struct net_bridge_port *p)
208 {
209 	nbp_backup_change(p, NULL);
210 	if (p->backup_redirected_cnt) {
211 		struct net_bridge_port *cur_p;
212 
213 		list_for_each_entry(cur_p, &p->br->port_list, list) {
214 			struct net_bridge_port *backup_p;
215 
216 			backup_p = rtnl_dereference(cur_p->backup_port);
217 			if (backup_p == p)
218 				nbp_backup_change(cur_p, NULL);
219 		}
220 	}
221 
222 	WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
223 }
224 
225 static void nbp_update_port_count(struct net_bridge *br)
226 {
227 	struct net_bridge_port *p;
228 	u32 cnt = 0;
229 
230 	list_for_each_entry(p, &br->port_list, list) {
231 		if (br_auto_port(p))
232 			cnt++;
233 	}
234 	if (br->auto_cnt != cnt) {
235 		br->auto_cnt = cnt;
236 		br_manage_promisc(br);
237 	}
238 }
239 
240 static void nbp_delete_promisc(struct net_bridge_port *p)
241 {
242 	/* If port is currently promiscuous, unset promiscuity.
243 	 * Otherwise, it is a static port so remove all addresses
244 	 * from it.
245 	 */
246 	dev_set_allmulti(p->dev, -1);
247 	if (br_promisc_port(p))
248 		dev_set_promiscuity(p->dev, -1);
249 	else
250 		br_fdb_unsync_static(p->br, p);
251 }
252 
253 static void release_nbp(struct kobject *kobj)
254 {
255 	struct net_bridge_port *p
256 		= container_of(kobj, struct net_bridge_port, kobj);
257 	kfree(p);
258 }
259 
260 static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
261 {
262 	struct net_bridge_port *p = kobj_to_brport(kobj);
263 
264 	net_ns_get_ownership(dev_net(p->dev), uid, gid);
265 }
266 
267 static struct kobj_type brport_ktype = {
268 #ifdef CONFIG_SYSFS
269 	.sysfs_ops = &brport_sysfs_ops,
270 #endif
271 	.release = release_nbp,
272 	.get_ownership = brport_get_ownership,
273 };
274 
275 static void destroy_nbp(struct net_bridge_port *p)
276 {
277 	struct net_device *dev = p->dev;
278 
279 	p->br = NULL;
280 	p->dev = NULL;
281 	dev_put(dev);
282 
283 	kobject_put(&p->kobj);
284 }
285 
286 static void destroy_nbp_rcu(struct rcu_head *head)
287 {
288 	struct net_bridge_port *p =
289 			container_of(head, struct net_bridge_port, rcu);
290 	destroy_nbp(p);
291 }
292 
293 static unsigned get_max_headroom(struct net_bridge *br)
294 {
295 	unsigned max_headroom = 0;
296 	struct net_bridge_port *p;
297 
298 	list_for_each_entry(p, &br->port_list, list) {
299 		unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
300 
301 		if (dev_headroom > max_headroom)
302 			max_headroom = dev_headroom;
303 	}
304 
305 	return max_headroom;
306 }
307 
308 static void update_headroom(struct net_bridge *br, int new_hr)
309 {
310 	struct net_bridge_port *p;
311 
312 	list_for_each_entry(p, &br->port_list, list)
313 		netdev_set_rx_headroom(p->dev, new_hr);
314 
315 	br->dev->needed_headroom = new_hr;
316 }
317 
318 /* Delete port(interface) from bridge is done in two steps.
319  * via RCU. First step, marks device as down. That deletes
320  * all the timers and stops new packets from flowing through.
321  *
322  * Final cleanup doesn't occur until after all CPU's finished
323  * processing packets.
324  *
325  * Protected from multiple admin operations by RTNL mutex
326  */
327 static void del_nbp(struct net_bridge_port *p)
328 {
329 	struct net_bridge *br = p->br;
330 	struct net_device *dev = p->dev;
331 
332 	sysfs_remove_link(br->ifobj, p->dev->name);
333 
334 	nbp_delete_promisc(p);
335 
336 	spin_lock_bh(&br->lock);
337 	br_stp_disable_port(p);
338 	spin_unlock_bh(&br->lock);
339 
340 	br_ifinfo_notify(RTM_DELLINK, NULL, p);
341 
342 	list_del_rcu(&p->list);
343 	if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
344 		update_headroom(br, get_max_headroom(br));
345 	netdev_reset_rx_headroom(dev);
346 
347 	nbp_vlan_flush(p);
348 	br_fdb_delete_by_port(br, p, 0, 1);
349 	switchdev_deferred_process();
350 	nbp_backup_clear(p);
351 
352 	nbp_update_port_count(br);
353 
354 	netdev_upper_dev_unlink(dev, br->dev);
355 
356 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
357 
358 	netdev_rx_handler_unregister(dev);
359 
360 	br_multicast_del_port(p);
361 
362 	kobject_uevent(&p->kobj, KOBJ_REMOVE);
363 	kobject_del(&p->kobj);
364 
365 	br_netpoll_disable(p);
366 
367 	call_rcu(&p->rcu, destroy_nbp_rcu);
368 }
369 
370 /* Delete bridge device */
371 void br_dev_delete(struct net_device *dev, struct list_head *head)
372 {
373 	struct net_bridge *br = netdev_priv(dev);
374 	struct net_bridge_port *p, *n;
375 
376 	list_for_each_entry_safe(p, n, &br->port_list, list) {
377 		del_nbp(p);
378 	}
379 
380 	br_recalculate_neigh_suppress_enabled(br);
381 
382 	br_fdb_delete_by_port(br, NULL, 0, 1);
383 
384 	cancel_delayed_work_sync(&br->gc_work);
385 
386 	br_sysfs_delbr(br->dev);
387 	unregister_netdevice_queue(br->dev, head);
388 }
389 
390 /* find an available port number */
391 static int find_portno(struct net_bridge *br)
392 {
393 	int index;
394 	struct net_bridge_port *p;
395 	unsigned long *inuse;
396 
397 	inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
398 			GFP_KERNEL);
399 	if (!inuse)
400 		return -ENOMEM;
401 
402 	set_bit(0, inuse);	/* zero is reserved */
403 	list_for_each_entry(p, &br->port_list, list) {
404 		set_bit(p->port_no, inuse);
405 	}
406 	index = find_first_zero_bit(inuse, BR_MAX_PORTS);
407 	kfree(inuse);
408 
409 	return (index >= BR_MAX_PORTS) ? -EXFULL : index;
410 }
411 
412 /* called with RTNL but without bridge lock */
413 static struct net_bridge_port *new_nbp(struct net_bridge *br,
414 				       struct net_device *dev)
415 {
416 	struct net_bridge_port *p;
417 	int index, err;
418 
419 	index = find_portno(br);
420 	if (index < 0)
421 		return ERR_PTR(index);
422 
423 	p = kzalloc(sizeof(*p), GFP_KERNEL);
424 	if (p == NULL)
425 		return ERR_PTR(-ENOMEM);
426 
427 	p->br = br;
428 	dev_hold(dev);
429 	p->dev = dev;
430 	p->path_cost = port_cost(dev);
431 	p->priority = 0x8000 >> BR_PORT_BITS;
432 	p->port_no = index;
433 	p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
434 	br_init_port(p);
435 	br_set_state(p, BR_STATE_DISABLED);
436 	br_stp_port_timer_init(p);
437 	err = br_multicast_add_port(p);
438 	if (err) {
439 		dev_put(dev);
440 		kfree(p);
441 		p = ERR_PTR(err);
442 	}
443 
444 	return p;
445 }
446 
447 int br_add_bridge(struct net *net, const char *name)
448 {
449 	struct net_device *dev;
450 	int res;
451 
452 	dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
453 			   br_dev_setup);
454 
455 	if (!dev)
456 		return -ENOMEM;
457 
458 	dev_net_set(dev, net);
459 	dev->rtnl_link_ops = &br_link_ops;
460 
461 	res = register_netdev(dev);
462 	if (res)
463 		free_netdev(dev);
464 	return res;
465 }
466 
467 int br_del_bridge(struct net *net, const char *name)
468 {
469 	struct net_device *dev;
470 	int ret = 0;
471 
472 	rtnl_lock();
473 	dev = __dev_get_by_name(net, name);
474 	if (dev == NULL)
475 		ret =  -ENXIO; 	/* Could not find device */
476 
477 	else if (!(dev->priv_flags & IFF_EBRIDGE)) {
478 		/* Attempt to delete non bridge device! */
479 		ret = -EPERM;
480 	}
481 
482 	else if (dev->flags & IFF_UP) {
483 		/* Not shutdown yet. */
484 		ret = -EBUSY;
485 	}
486 
487 	else
488 		br_dev_delete(dev, NULL);
489 
490 	rtnl_unlock();
491 	return ret;
492 }
493 
494 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
495 static int br_mtu_min(const struct net_bridge *br)
496 {
497 	const struct net_bridge_port *p;
498 	int ret_mtu = 0;
499 
500 	list_for_each_entry(p, &br->port_list, list)
501 		if (!ret_mtu || ret_mtu > p->dev->mtu)
502 			ret_mtu = p->dev->mtu;
503 
504 	return ret_mtu ? ret_mtu : ETH_DATA_LEN;
505 }
506 
507 void br_mtu_auto_adjust(struct net_bridge *br)
508 {
509 	ASSERT_RTNL();
510 
511 	/* if the bridge MTU was manually configured don't mess with it */
512 	if (br->mtu_set_by_user)
513 		return;
514 
515 	/* change to the minimum MTU and clear the flag which was set by
516 	 * the bridge ndo_change_mtu callback
517 	 */
518 	dev_set_mtu(br->dev, br_mtu_min(br));
519 	br->mtu_set_by_user = false;
520 }
521 
522 static void br_set_gso_limits(struct net_bridge *br)
523 {
524 	unsigned int gso_max_size = GSO_MAX_SIZE;
525 	u16 gso_max_segs = GSO_MAX_SEGS;
526 	const struct net_bridge_port *p;
527 
528 	list_for_each_entry(p, &br->port_list, list) {
529 		gso_max_size = min(gso_max_size, p->dev->gso_max_size);
530 		gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
531 	}
532 	br->dev->gso_max_size = gso_max_size;
533 	br->dev->gso_max_segs = gso_max_segs;
534 }
535 
536 /*
537  * Recomputes features using slave's features
538  */
539 netdev_features_t br_features_recompute(struct net_bridge *br,
540 	netdev_features_t features)
541 {
542 	struct net_bridge_port *p;
543 	netdev_features_t mask;
544 
545 	if (list_empty(&br->port_list))
546 		return features;
547 
548 	mask = features;
549 	features &= ~NETIF_F_ONE_FOR_ALL;
550 
551 	list_for_each_entry(p, &br->port_list, list) {
552 		features = netdev_increment_features(features,
553 						     p->dev->features, mask);
554 	}
555 	features = netdev_add_tso_features(features, mask);
556 
557 	return features;
558 }
559 
560 /* called with RTNL */
561 int br_add_if(struct net_bridge *br, struct net_device *dev,
562 	      struct netlink_ext_ack *extack)
563 {
564 	struct net_bridge_port *p;
565 	int err = 0;
566 	unsigned br_hr, dev_hr;
567 	bool changed_addr;
568 
569 	/* Don't allow bridging non-ethernet like devices, or DSA-enabled
570 	 * master network devices since the bridge layer rx_handler prevents
571 	 * the DSA fake ethertype handler to be invoked, so we do not strip off
572 	 * the DSA switch tag protocol header and the bridge layer just return
573 	 * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
574 	 */
575 	if ((dev->flags & IFF_LOOPBACK) ||
576 	    dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
577 	    !is_valid_ether_addr(dev->dev_addr) ||
578 	    netdev_uses_dsa(dev))
579 		return -EINVAL;
580 
581 	/* No bridging of bridges */
582 	if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
583 		NL_SET_ERR_MSG(extack,
584 			       "Can not enslave a bridge to a bridge");
585 		return -ELOOP;
586 	}
587 
588 	/* Device has master upper dev */
589 	if (netdev_master_upper_dev_get(dev))
590 		return -EBUSY;
591 
592 	/* No bridging devices that dislike that (e.g. wireless) */
593 	if (dev->priv_flags & IFF_DONT_BRIDGE) {
594 		NL_SET_ERR_MSG(extack,
595 			       "Device does not allow enslaving to a bridge");
596 		return -EOPNOTSUPP;
597 	}
598 
599 	p = new_nbp(br, dev);
600 	if (IS_ERR(p))
601 		return PTR_ERR(p);
602 
603 	call_netdevice_notifiers(NETDEV_JOIN, dev);
604 
605 	err = dev_set_allmulti(dev, 1);
606 	if (err)
607 		goto put_back;
608 
609 	err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
610 				   SYSFS_BRIDGE_PORT_ATTR);
611 	if (err)
612 		goto err1;
613 
614 	err = br_sysfs_addif(p);
615 	if (err)
616 		goto err2;
617 
618 	err = br_netpoll_enable(p);
619 	if (err)
620 		goto err3;
621 
622 	err = netdev_rx_handler_register(dev, br_handle_frame, p);
623 	if (err)
624 		goto err4;
625 
626 	dev->priv_flags |= IFF_BRIDGE_PORT;
627 
628 	err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
629 	if (err)
630 		goto err5;
631 
632 	err = nbp_switchdev_mark_set(p);
633 	if (err)
634 		goto err6;
635 
636 	dev_disable_lro(dev);
637 
638 	list_add_rcu(&p->list, &br->port_list);
639 
640 	nbp_update_port_count(br);
641 
642 	netdev_update_features(br->dev);
643 
644 	br_hr = br->dev->needed_headroom;
645 	dev_hr = netdev_get_fwd_headroom(dev);
646 	if (br_hr < dev_hr)
647 		update_headroom(br, dev_hr);
648 	else
649 		netdev_set_rx_headroom(dev, br_hr);
650 
651 	if (br_fdb_insert(br, p, dev->dev_addr, 0))
652 		netdev_err(dev, "failed insert local address bridge forwarding table\n");
653 
654 	err = nbp_vlan_init(p);
655 	if (err) {
656 		netdev_err(dev, "failed to initialize vlan filtering on this port\n");
657 		goto err7;
658 	}
659 
660 	spin_lock_bh(&br->lock);
661 	changed_addr = br_stp_recalculate_bridge_id(br);
662 
663 	if (netif_running(dev) && netif_oper_up(dev) &&
664 	    (br->dev->flags & IFF_UP))
665 		br_stp_enable_port(p);
666 	spin_unlock_bh(&br->lock);
667 
668 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
669 
670 	if (changed_addr)
671 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
672 
673 	br_mtu_auto_adjust(br);
674 	br_set_gso_limits(br);
675 
676 	kobject_uevent(&p->kobj, KOBJ_ADD);
677 
678 	return 0;
679 
680 err7:
681 	list_del_rcu(&p->list);
682 	br_fdb_delete_by_port(br, p, 0, 1);
683 	nbp_update_port_count(br);
684 err6:
685 	netdev_upper_dev_unlink(dev, br->dev);
686 err5:
687 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
688 	netdev_rx_handler_unregister(dev);
689 err4:
690 	br_netpoll_disable(p);
691 err3:
692 	sysfs_remove_link(br->ifobj, p->dev->name);
693 err2:
694 	kobject_put(&p->kobj);
695 	p = NULL; /* kobject_put frees */
696 err1:
697 	dev_set_allmulti(dev, -1);
698 put_back:
699 	dev_put(dev);
700 	kfree(p);
701 	return err;
702 }
703 
704 /* called with RTNL */
705 int br_del_if(struct net_bridge *br, struct net_device *dev)
706 {
707 	struct net_bridge_port *p;
708 	bool changed_addr;
709 
710 	p = br_port_get_rtnl(dev);
711 	if (!p || p->br != br)
712 		return -EINVAL;
713 
714 	/* Since more than one interface can be attached to a bridge,
715 	 * there still maybe an alternate path for netconsole to use;
716 	 * therefore there is no reason for a NETDEV_RELEASE event.
717 	 */
718 	del_nbp(p);
719 
720 	br_mtu_auto_adjust(br);
721 	br_set_gso_limits(br);
722 
723 	spin_lock_bh(&br->lock);
724 	changed_addr = br_stp_recalculate_bridge_id(br);
725 	spin_unlock_bh(&br->lock);
726 
727 	if (changed_addr)
728 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
729 
730 	netdev_update_features(br->dev);
731 
732 	return 0;
733 }
734 
735 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
736 {
737 	struct net_bridge *br = p->br;
738 
739 	if (mask & BR_AUTO_MASK)
740 		nbp_update_port_count(br);
741 
742 	if (mask & BR_NEIGH_SUPPRESS)
743 		br_recalculate_neigh_suppress_enabled(br);
744 }
745