1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Userspace interface 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netpoll.h> 14 #include <linux/ethtool.h> 15 #include <linux/if_arp.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/rtnetlink.h> 19 #include <linux/if_ether.h> 20 #include <linux/slab.h> 21 #include <net/dsa.h> 22 #include <net/sock.h> 23 #include <linux/if_vlan.h> 24 #include <net/switchdev.h> 25 #include <net/net_namespace.h> 26 27 #include "br_private.h" 28 29 /* 30 * Determine initial path cost based on speed. 31 * using recommendations from 802.1d standard 32 * 33 * Since driver might sleep need to not be holding any locks. 34 */ 35 static int port_cost(struct net_device *dev) 36 { 37 struct ethtool_link_ksettings ecmd; 38 39 if (!__ethtool_get_link_ksettings(dev, &ecmd)) { 40 switch (ecmd.base.speed) { 41 case SPEED_10000: 42 return 2; 43 case SPEED_1000: 44 return 4; 45 case SPEED_100: 46 return 19; 47 case SPEED_10: 48 return 100; 49 } 50 } 51 52 /* Old silly heuristics based on name */ 53 if (!strncmp(dev->name, "lec", 3)) 54 return 7; 55 56 if (!strncmp(dev->name, "plip", 4)) 57 return 2500; 58 59 return 100; /* assume old 10Mbps */ 60 } 61 62 63 /* Check for port carrier transitions. */ 64 void br_port_carrier_check(struct net_bridge_port *p, bool *notified) 65 { 66 struct net_device *dev = p->dev; 67 struct net_bridge *br = p->br; 68 69 if (!(p->flags & BR_ADMIN_COST) && 70 netif_running(dev) && netif_oper_up(dev)) 71 p->path_cost = port_cost(dev); 72 73 *notified = false; 74 if (!netif_running(br->dev)) 75 return; 76 77 spin_lock_bh(&br->lock); 78 if (netif_running(dev) && netif_oper_up(dev)) { 79 if (p->state == BR_STATE_DISABLED) { 80 br_stp_enable_port(p); 81 *notified = true; 82 } 83 } else { 84 if (p->state != BR_STATE_DISABLED) { 85 br_stp_disable_port(p); 86 *notified = true; 87 } 88 } 89 spin_unlock_bh(&br->lock); 90 } 91 92 static void br_port_set_promisc(struct net_bridge_port *p) 93 { 94 int err = 0; 95 96 if (br_promisc_port(p)) 97 return; 98 99 err = dev_set_promiscuity(p->dev, 1); 100 if (err) 101 return; 102 103 br_fdb_unsync_static(p->br, p); 104 p->flags |= BR_PROMISC; 105 } 106 107 static void br_port_clear_promisc(struct net_bridge_port *p) 108 { 109 int err; 110 111 /* Check if the port is already non-promisc or if it doesn't 112 * support UNICAST filtering. Without unicast filtering support 113 * we'll end up re-enabling promisc mode anyway, so just check for 114 * it here. 115 */ 116 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT)) 117 return; 118 119 /* Since we'll be clearing the promisc mode, program the port 120 * first so that we don't have interruption in traffic. 121 */ 122 err = br_fdb_sync_static(p->br, p); 123 if (err) 124 return; 125 126 dev_set_promiscuity(p->dev, -1); 127 p->flags &= ~BR_PROMISC; 128 } 129 130 /* When a port is added or removed or when certain port flags 131 * change, this function is called to automatically manage 132 * promiscuity setting of all the bridge ports. We are always called 133 * under RTNL so can skip using rcu primitives. 134 */ 135 void br_manage_promisc(struct net_bridge *br) 136 { 137 struct net_bridge_port *p; 138 bool set_all = false; 139 140 /* If vlan filtering is disabled or bridge interface is placed 141 * into promiscuous mode, place all ports in promiscuous mode. 142 */ 143 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev)) 144 set_all = true; 145 146 list_for_each_entry(p, &br->port_list, list) { 147 if (set_all) { 148 br_port_set_promisc(p); 149 } else { 150 /* If the number of auto-ports is <= 1, then all other 151 * ports will have their output configuration 152 * statically specified through fdbs. Since ingress 153 * on the auto-port becomes forwarding/egress to other 154 * ports and egress configuration is statically known, 155 * we can say that ingress configuration of the 156 * auto-port is also statically known. 157 * This lets us disable promiscuous mode and write 158 * this config to hw. 159 */ 160 if (br->auto_cnt == 0 || 161 (br->auto_cnt == 1 && br_auto_port(p))) 162 br_port_clear_promisc(p); 163 else 164 br_port_set_promisc(p); 165 } 166 } 167 } 168 169 int nbp_backup_change(struct net_bridge_port *p, 170 struct net_device *backup_dev) 171 { 172 struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port); 173 struct net_bridge_port *backup_p = NULL; 174 175 ASSERT_RTNL(); 176 177 if (backup_dev) { 178 if (!netif_is_bridge_port(backup_dev)) 179 return -ENOENT; 180 181 backup_p = br_port_get_rtnl(backup_dev); 182 if (backup_p->br != p->br) 183 return -EINVAL; 184 } 185 186 if (p == backup_p) 187 return -EINVAL; 188 189 if (old_backup == backup_p) 190 return 0; 191 192 /* if the backup link is already set, clear it */ 193 if (old_backup) 194 old_backup->backup_redirected_cnt--; 195 196 if (backup_p) 197 backup_p->backup_redirected_cnt++; 198 rcu_assign_pointer(p->backup_port, backup_p); 199 200 return 0; 201 } 202 203 static void nbp_backup_clear(struct net_bridge_port *p) 204 { 205 nbp_backup_change(p, NULL); 206 if (p->backup_redirected_cnt) { 207 struct net_bridge_port *cur_p; 208 209 list_for_each_entry(cur_p, &p->br->port_list, list) { 210 struct net_bridge_port *backup_p; 211 212 backup_p = rtnl_dereference(cur_p->backup_port); 213 if (backup_p == p) 214 nbp_backup_change(cur_p, NULL); 215 } 216 } 217 218 WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt); 219 } 220 221 static void nbp_update_port_count(struct net_bridge *br) 222 { 223 struct net_bridge_port *p; 224 u32 cnt = 0; 225 226 list_for_each_entry(p, &br->port_list, list) { 227 if (br_auto_port(p)) 228 cnt++; 229 } 230 if (br->auto_cnt != cnt) { 231 br->auto_cnt = cnt; 232 br_manage_promisc(br); 233 } 234 } 235 236 static void nbp_delete_promisc(struct net_bridge_port *p) 237 { 238 /* If port is currently promiscuous, unset promiscuity. 239 * Otherwise, it is a static port so remove all addresses 240 * from it. 241 */ 242 dev_set_allmulti(p->dev, -1); 243 if (br_promisc_port(p)) 244 dev_set_promiscuity(p->dev, -1); 245 else 246 br_fdb_unsync_static(p->br, p); 247 } 248 249 static void release_nbp(struct kobject *kobj) 250 { 251 struct net_bridge_port *p 252 = container_of(kobj, struct net_bridge_port, kobj); 253 kfree(p); 254 } 255 256 static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 257 { 258 struct net_bridge_port *p = kobj_to_brport(kobj); 259 260 net_ns_get_ownership(dev_net(p->dev), uid, gid); 261 } 262 263 static struct kobj_type brport_ktype = { 264 #ifdef CONFIG_SYSFS 265 .sysfs_ops = &brport_sysfs_ops, 266 #endif 267 .release = release_nbp, 268 .get_ownership = brport_get_ownership, 269 }; 270 271 static void destroy_nbp(struct net_bridge_port *p) 272 { 273 struct net_device *dev = p->dev; 274 275 p->br = NULL; 276 p->dev = NULL; 277 dev_put_track(dev, &p->dev_tracker); 278 279 kobject_put(&p->kobj); 280 } 281 282 static void destroy_nbp_rcu(struct rcu_head *head) 283 { 284 struct net_bridge_port *p = 285 container_of(head, struct net_bridge_port, rcu); 286 destroy_nbp(p); 287 } 288 289 static unsigned get_max_headroom(struct net_bridge *br) 290 { 291 unsigned max_headroom = 0; 292 struct net_bridge_port *p; 293 294 list_for_each_entry(p, &br->port_list, list) { 295 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev); 296 297 if (dev_headroom > max_headroom) 298 max_headroom = dev_headroom; 299 } 300 301 return max_headroom; 302 } 303 304 static void update_headroom(struct net_bridge *br, int new_hr) 305 { 306 struct net_bridge_port *p; 307 308 list_for_each_entry(p, &br->port_list, list) 309 netdev_set_rx_headroom(p->dev, new_hr); 310 311 br->dev->needed_headroom = new_hr; 312 } 313 314 /* Delete port(interface) from bridge is done in two steps. 315 * via RCU. First step, marks device as down. That deletes 316 * all the timers and stops new packets from flowing through. 317 * 318 * Final cleanup doesn't occur until after all CPU's finished 319 * processing packets. 320 * 321 * Protected from multiple admin operations by RTNL mutex 322 */ 323 static void del_nbp(struct net_bridge_port *p) 324 { 325 struct net_bridge *br = p->br; 326 struct net_device *dev = p->dev; 327 328 sysfs_remove_link(br->ifobj, p->dev->name); 329 330 nbp_delete_promisc(p); 331 332 spin_lock_bh(&br->lock); 333 br_stp_disable_port(p); 334 spin_unlock_bh(&br->lock); 335 336 br_mrp_port_del(br, p); 337 br_cfm_port_del(br, p); 338 339 br_ifinfo_notify(RTM_DELLINK, NULL, p); 340 341 list_del_rcu(&p->list); 342 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom) 343 update_headroom(br, get_max_headroom(br)); 344 netdev_reset_rx_headroom(dev); 345 346 nbp_vlan_flush(p); 347 br_fdb_delete_by_port(br, p, 0, 1); 348 switchdev_deferred_process(); 349 nbp_backup_clear(p); 350 351 nbp_update_port_count(br); 352 353 netdev_upper_dev_unlink(dev, br->dev); 354 355 dev->priv_flags &= ~IFF_BRIDGE_PORT; 356 357 netdev_rx_handler_unregister(dev); 358 359 br_multicast_del_port(p); 360 361 kobject_uevent(&p->kobj, KOBJ_REMOVE); 362 kobject_del(&p->kobj); 363 364 br_netpoll_disable(p); 365 366 call_rcu(&p->rcu, destroy_nbp_rcu); 367 } 368 369 /* Delete bridge device */ 370 void br_dev_delete(struct net_device *dev, struct list_head *head) 371 { 372 struct net_bridge *br = netdev_priv(dev); 373 struct net_bridge_port *p, *n; 374 375 list_for_each_entry_safe(p, n, &br->port_list, list) { 376 del_nbp(p); 377 } 378 379 br_recalculate_neigh_suppress_enabled(br); 380 381 br_fdb_delete_by_port(br, NULL, 0, 1); 382 383 cancel_delayed_work_sync(&br->gc_work); 384 385 br_sysfs_delbr(br->dev); 386 unregister_netdevice_queue(br->dev, head); 387 } 388 389 /* find an available port number */ 390 static int find_portno(struct net_bridge *br) 391 { 392 int index; 393 struct net_bridge_port *p; 394 unsigned long *inuse; 395 396 inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL); 397 if (!inuse) 398 return -ENOMEM; 399 400 __set_bit(0, inuse); /* zero is reserved */ 401 list_for_each_entry(p, &br->port_list, list) 402 __set_bit(p->port_no, inuse); 403 404 index = find_first_zero_bit(inuse, BR_MAX_PORTS); 405 bitmap_free(inuse); 406 407 return (index >= BR_MAX_PORTS) ? -EXFULL : index; 408 } 409 410 /* called with RTNL but without bridge lock */ 411 static struct net_bridge_port *new_nbp(struct net_bridge *br, 412 struct net_device *dev) 413 { 414 struct net_bridge_port *p; 415 int index, err; 416 417 index = find_portno(br); 418 if (index < 0) 419 return ERR_PTR(index); 420 421 p = kzalloc(sizeof(*p), GFP_KERNEL); 422 if (p == NULL) 423 return ERR_PTR(-ENOMEM); 424 425 p->br = br; 426 dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL); 427 p->dev = dev; 428 p->path_cost = port_cost(dev); 429 p->priority = 0x8000 >> BR_PORT_BITS; 430 p->port_no = index; 431 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 432 br_init_port(p); 433 br_set_state(p, BR_STATE_DISABLED); 434 br_stp_port_timer_init(p); 435 err = br_multicast_add_port(p); 436 if (err) { 437 dev_put_track(dev, &p->dev_tracker); 438 kfree(p); 439 p = ERR_PTR(err); 440 } 441 442 return p; 443 } 444 445 int br_add_bridge(struct net *net, const char *name) 446 { 447 struct net_device *dev; 448 int res; 449 450 dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN, 451 br_dev_setup); 452 453 if (!dev) 454 return -ENOMEM; 455 456 dev_net_set(dev, net); 457 dev->rtnl_link_ops = &br_link_ops; 458 459 res = register_netdevice(dev); 460 if (res) 461 free_netdev(dev); 462 return res; 463 } 464 465 int br_del_bridge(struct net *net, const char *name) 466 { 467 struct net_device *dev; 468 int ret = 0; 469 470 dev = __dev_get_by_name(net, name); 471 if (dev == NULL) 472 ret = -ENXIO; /* Could not find device */ 473 474 else if (!netif_is_bridge_master(dev)) { 475 /* Attempt to delete non bridge device! */ 476 ret = -EPERM; 477 } 478 479 else if (dev->flags & IFF_UP) { 480 /* Not shutdown yet. */ 481 ret = -EBUSY; 482 } 483 484 else 485 br_dev_delete(dev, NULL); 486 487 return ret; 488 } 489 490 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */ 491 static int br_mtu_min(const struct net_bridge *br) 492 { 493 const struct net_bridge_port *p; 494 int ret_mtu = 0; 495 496 list_for_each_entry(p, &br->port_list, list) 497 if (!ret_mtu || ret_mtu > p->dev->mtu) 498 ret_mtu = p->dev->mtu; 499 500 return ret_mtu ? ret_mtu : ETH_DATA_LEN; 501 } 502 503 void br_mtu_auto_adjust(struct net_bridge *br) 504 { 505 ASSERT_RTNL(); 506 507 /* if the bridge MTU was manually configured don't mess with it */ 508 if (br_opt_get(br, BROPT_MTU_SET_BY_USER)) 509 return; 510 511 /* change to the minimum MTU and clear the flag which was set by 512 * the bridge ndo_change_mtu callback 513 */ 514 dev_set_mtu(br->dev, br_mtu_min(br)); 515 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false); 516 } 517 518 static void br_set_gso_limits(struct net_bridge *br) 519 { 520 unsigned int tso_max_size = TSO_MAX_SIZE; 521 const struct net_bridge_port *p; 522 u16 tso_max_segs = TSO_MAX_SEGS; 523 524 list_for_each_entry(p, &br->port_list, list) { 525 tso_max_size = min(tso_max_size, p->dev->tso_max_size); 526 tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs); 527 } 528 netif_set_tso_max_size(br->dev, tso_max_size); 529 netif_set_tso_max_segs(br->dev, tso_max_segs); 530 } 531 532 /* 533 * Recomputes features using slave's features 534 */ 535 netdev_features_t br_features_recompute(struct net_bridge *br, 536 netdev_features_t features) 537 { 538 struct net_bridge_port *p; 539 netdev_features_t mask; 540 541 if (list_empty(&br->port_list)) 542 return features; 543 544 mask = features; 545 features &= ~NETIF_F_ONE_FOR_ALL; 546 547 list_for_each_entry(p, &br->port_list, list) { 548 features = netdev_increment_features(features, 549 p->dev->features, mask); 550 } 551 features = netdev_add_tso_features(features, mask); 552 553 return features; 554 } 555 556 /* called with RTNL */ 557 int br_add_if(struct net_bridge *br, struct net_device *dev, 558 struct netlink_ext_ack *extack) 559 { 560 struct net_bridge_port *p; 561 int err = 0; 562 unsigned br_hr, dev_hr; 563 bool changed_addr, fdb_synced = false; 564 565 /* Don't allow bridging non-ethernet like devices. */ 566 if ((dev->flags & IFF_LOOPBACK) || 567 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN || 568 !is_valid_ether_addr(dev->dev_addr)) 569 return -EINVAL; 570 571 /* Also don't allow bridging of net devices that are DSA masters, since 572 * the bridge layer rx_handler prevents the DSA fake ethertype handler 573 * to be invoked, so we don't get the chance to strip off and parse the 574 * DSA switch tag protocol header (the bridge layer just returns 575 * RX_HANDLER_CONSUMED, stopping RX processing for these frames). 576 * The only case where that would not be an issue is when bridging can 577 * already be offloaded, such as when the DSA master is itself a DSA 578 * or plain switchdev port, and is bridged only with other ports from 579 * the same hardware device. 580 */ 581 if (netdev_uses_dsa(dev)) { 582 list_for_each_entry(p, &br->port_list, list) { 583 if (!netdev_port_same_parent_id(dev, p->dev)) { 584 NL_SET_ERR_MSG(extack, 585 "Cannot do software bridging with a DSA master"); 586 return -EINVAL; 587 } 588 } 589 } 590 591 /* No bridging of bridges */ 592 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) { 593 NL_SET_ERR_MSG(extack, 594 "Can not enslave a bridge to a bridge"); 595 return -ELOOP; 596 } 597 598 /* Device has master upper dev */ 599 if (netdev_master_upper_dev_get(dev)) 600 return -EBUSY; 601 602 /* No bridging devices that dislike that (e.g. wireless) */ 603 if (dev->priv_flags & IFF_DONT_BRIDGE) { 604 NL_SET_ERR_MSG(extack, 605 "Device does not allow enslaving to a bridge"); 606 return -EOPNOTSUPP; 607 } 608 609 p = new_nbp(br, dev); 610 if (IS_ERR(p)) 611 return PTR_ERR(p); 612 613 call_netdevice_notifiers(NETDEV_JOIN, dev); 614 615 err = dev_set_allmulti(dev, 1); 616 if (err) { 617 br_multicast_del_port(p); 618 dev_put_track(dev, &p->dev_tracker); 619 kfree(p); /* kobject not yet init'd, manually free */ 620 goto err1; 621 } 622 623 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), 624 SYSFS_BRIDGE_PORT_ATTR); 625 if (err) 626 goto err2; 627 628 err = br_sysfs_addif(p); 629 if (err) 630 goto err2; 631 632 err = br_netpoll_enable(p); 633 if (err) 634 goto err3; 635 636 err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p); 637 if (err) 638 goto err4; 639 640 dev->priv_flags |= IFF_BRIDGE_PORT; 641 642 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack); 643 if (err) 644 goto err5; 645 646 dev_disable_lro(dev); 647 648 list_add_rcu(&p->list, &br->port_list); 649 650 nbp_update_port_count(br); 651 if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) { 652 /* When updating the port count we also update all ports' 653 * promiscuous mode. 654 * A port leaving promiscuous mode normally gets the bridge's 655 * fdb synced to the unicast filter (if supported), however, 656 * `br_port_clear_promisc` does not distinguish between 657 * non-promiscuous ports and *new* ports, so we need to 658 * sync explicitly here. 659 */ 660 fdb_synced = br_fdb_sync_static(br, p) == 0; 661 if (!fdb_synced) 662 netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n"); 663 } 664 665 netdev_update_features(br->dev); 666 667 br_hr = br->dev->needed_headroom; 668 dev_hr = netdev_get_fwd_headroom(dev); 669 if (br_hr < dev_hr) 670 update_headroom(br, dev_hr); 671 else 672 netdev_set_rx_headroom(dev, br_hr); 673 674 if (br_fdb_add_local(br, p, dev->dev_addr, 0)) 675 netdev_err(dev, "failed insert local address bridge forwarding table\n"); 676 677 if (br->dev->addr_assign_type != NET_ADDR_SET) { 678 /* Ask for permission to use this MAC address now, even if we 679 * don't end up choosing it below. 680 */ 681 err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack); 682 if (err) 683 goto err6; 684 } 685 686 err = nbp_vlan_init(p, extack); 687 if (err) { 688 netdev_err(dev, "failed to initialize vlan filtering on this port\n"); 689 goto err6; 690 } 691 692 spin_lock_bh(&br->lock); 693 changed_addr = br_stp_recalculate_bridge_id(br); 694 695 if (netif_running(dev) && netif_oper_up(dev) && 696 (br->dev->flags & IFF_UP)) 697 br_stp_enable_port(p); 698 spin_unlock_bh(&br->lock); 699 700 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 701 702 if (changed_addr) 703 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); 704 705 br_mtu_auto_adjust(br); 706 br_set_gso_limits(br); 707 708 kobject_uevent(&p->kobj, KOBJ_ADD); 709 710 return 0; 711 712 err6: 713 if (fdb_synced) 714 br_fdb_unsync_static(br, p); 715 list_del_rcu(&p->list); 716 br_fdb_delete_by_port(br, p, 0, 1); 717 nbp_update_port_count(br); 718 netdev_upper_dev_unlink(dev, br->dev); 719 err5: 720 dev->priv_flags &= ~IFF_BRIDGE_PORT; 721 netdev_rx_handler_unregister(dev); 722 err4: 723 br_netpoll_disable(p); 724 err3: 725 sysfs_remove_link(br->ifobj, p->dev->name); 726 err2: 727 br_multicast_del_port(p); 728 dev_put_track(dev, &p->dev_tracker); 729 kobject_put(&p->kobj); 730 dev_set_allmulti(dev, -1); 731 err1: 732 return err; 733 } 734 735 /* called with RTNL */ 736 int br_del_if(struct net_bridge *br, struct net_device *dev) 737 { 738 struct net_bridge_port *p; 739 bool changed_addr; 740 741 p = br_port_get_rtnl(dev); 742 if (!p || p->br != br) 743 return -EINVAL; 744 745 /* Since more than one interface can be attached to a bridge, 746 * there still maybe an alternate path for netconsole to use; 747 * therefore there is no reason for a NETDEV_RELEASE event. 748 */ 749 del_nbp(p); 750 751 br_mtu_auto_adjust(br); 752 br_set_gso_limits(br); 753 754 spin_lock_bh(&br->lock); 755 changed_addr = br_stp_recalculate_bridge_id(br); 756 spin_unlock_bh(&br->lock); 757 758 if (changed_addr) 759 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); 760 761 netdev_update_features(br->dev); 762 763 return 0; 764 } 765 766 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask) 767 { 768 struct net_bridge *br = p->br; 769 770 if (mask & BR_AUTO_MASK) 771 nbp_update_port_count(br); 772 773 if (mask & BR_NEIGH_SUPPRESS) 774 br_recalculate_neigh_suppress_enabled(br); 775 } 776 777 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag) 778 { 779 struct net_bridge_port *p; 780 781 p = br_port_get_rtnl_rcu(dev); 782 if (!p) 783 return false; 784 785 return p->flags & flag; 786 } 787 EXPORT_SYMBOL_GPL(br_port_flag_is_set); 788