1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Device handling code 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/netpoll.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/list.h> 16 #include <linux/netfilter_bridge.h> 17 18 #include <linux/uaccess.h> 19 #include "br_private.h" 20 21 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \ 22 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM) 23 24 const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 25 EXPORT_SYMBOL_GPL(nf_br_ops); 26 27 /* net device transmit always called with BH disabled */ 28 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 29 { 30 struct net_bridge_mcast_port *pmctx_null = NULL; 31 struct net_bridge *br = netdev_priv(dev); 32 struct net_bridge_mcast *brmctx = &br->multicast_ctx; 33 struct net_bridge_fdb_entry *dst; 34 struct net_bridge_mdb_entry *mdst; 35 const struct nf_br_ops *nf_ops; 36 u8 state = BR_STATE_FORWARDING; 37 struct net_bridge_vlan *vlan; 38 const unsigned char *dest; 39 u16 vid = 0; 40 41 memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); 42 43 rcu_read_lock(); 44 nf_ops = rcu_dereference(nf_br_ops); 45 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { 46 rcu_read_unlock(); 47 return NETDEV_TX_OK; 48 } 49 50 dev_sw_netstats_tx_add(dev, 1, skb->len); 51 52 br_switchdev_frame_unmark(skb); 53 BR_INPUT_SKB_CB(skb)->brdev = dev; 54 BR_INPUT_SKB_CB(skb)->frag_max_size = 0; 55 56 skb_reset_mac_header(skb); 57 skb_pull(skb, ETH_HLEN); 58 59 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, 60 &state, &vlan)) 61 goto out; 62 63 if (IS_ENABLED(CONFIG_INET) && 64 (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) || 65 eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) && 66 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { 67 br_do_proxy_suppress_arp(skb, br, vid, NULL); 68 } else if (IS_ENABLED(CONFIG_IPV6) && 69 skb->protocol == htons(ETH_P_IPV6) && 70 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && 71 pskb_may_pull(skb, sizeof(struct ipv6hdr) + 72 sizeof(struct nd_msg)) && 73 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 74 struct nd_msg *msg, _msg; 75 76 msg = br_is_nd_neigh_msg(skb, &_msg); 77 if (msg) 78 br_do_suppress_nd(skb, br, vid, NULL, msg); 79 } 80 81 dest = eth_hdr(skb)->h_dest; 82 if (is_broadcast_ether_addr(dest)) { 83 br_flood(br, skb, BR_PKT_BROADCAST, false, true); 84 } else if (is_multicast_ether_addr(dest)) { 85 if (unlikely(netpoll_tx_running(dev))) { 86 br_flood(br, skb, BR_PKT_MULTICAST, false, true); 87 goto out; 88 } 89 if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) { 90 kfree_skb(skb); 91 goto out; 92 } 93 94 mdst = br_mdb_get(brmctx, skb, vid); 95 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 96 br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) 97 br_multicast_flood(mdst, skb, brmctx, false, true); 98 else 99 br_flood(br, skb, BR_PKT_MULTICAST, false, true); 100 } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) { 101 br_forward(dst->dst, skb, false, true); 102 } else { 103 br_flood(br, skb, BR_PKT_UNICAST, false, true); 104 } 105 out: 106 rcu_read_unlock(); 107 return NETDEV_TX_OK; 108 } 109 110 static struct lock_class_key bridge_netdev_addr_lock_key; 111 112 static void br_set_lockdep_class(struct net_device *dev) 113 { 114 lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key); 115 } 116 117 static int br_dev_init(struct net_device *dev) 118 { 119 struct net_bridge *br = netdev_priv(dev); 120 int err; 121 122 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 123 if (!dev->tstats) 124 return -ENOMEM; 125 126 err = br_fdb_hash_init(br); 127 if (err) { 128 free_percpu(dev->tstats); 129 return err; 130 } 131 132 err = br_mdb_hash_init(br); 133 if (err) { 134 free_percpu(dev->tstats); 135 br_fdb_hash_fini(br); 136 return err; 137 } 138 139 err = br_vlan_init(br); 140 if (err) { 141 free_percpu(dev->tstats); 142 br_mdb_hash_fini(br); 143 br_fdb_hash_fini(br); 144 return err; 145 } 146 147 err = br_multicast_init_stats(br); 148 if (err) { 149 free_percpu(dev->tstats); 150 br_vlan_flush(br); 151 br_mdb_hash_fini(br); 152 br_fdb_hash_fini(br); 153 } 154 155 br_set_lockdep_class(dev); 156 return err; 157 } 158 159 static void br_dev_uninit(struct net_device *dev) 160 { 161 struct net_bridge *br = netdev_priv(dev); 162 163 br_multicast_dev_del(br); 164 br_multicast_uninit_stats(br); 165 br_vlan_flush(br); 166 br_mdb_hash_fini(br); 167 br_fdb_hash_fini(br); 168 free_percpu(dev->tstats); 169 } 170 171 static int br_dev_open(struct net_device *dev) 172 { 173 struct net_bridge *br = netdev_priv(dev); 174 175 netdev_update_features(dev); 176 netif_start_queue(dev); 177 br_stp_enable_bridge(br); 178 br_multicast_open(br); 179 180 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 181 br_multicast_join_snoopers(br); 182 183 return 0; 184 } 185 186 static void br_dev_set_multicast_list(struct net_device *dev) 187 { 188 } 189 190 static void br_dev_change_rx_flags(struct net_device *dev, int change) 191 { 192 if (change & IFF_PROMISC) 193 br_manage_promisc(netdev_priv(dev)); 194 } 195 196 static int br_dev_stop(struct net_device *dev) 197 { 198 struct net_bridge *br = netdev_priv(dev); 199 200 br_stp_disable_bridge(br); 201 br_multicast_stop(br); 202 203 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 204 br_multicast_leave_snoopers(br); 205 206 netif_stop_queue(dev); 207 208 return 0; 209 } 210 211 static int br_change_mtu(struct net_device *dev, int new_mtu) 212 { 213 struct net_bridge *br = netdev_priv(dev); 214 215 dev->mtu = new_mtu; 216 217 /* this flag will be cleared if the MTU was automatically adjusted */ 218 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true); 219 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 220 /* remember the MTU in the rtable for PMTU */ 221 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); 222 #endif 223 224 return 0; 225 } 226 227 /* Allow setting mac address to any valid ethernet address. */ 228 static int br_set_mac_address(struct net_device *dev, void *p) 229 { 230 struct net_bridge *br = netdev_priv(dev); 231 struct sockaddr *addr = p; 232 233 if (!is_valid_ether_addr(addr->sa_data)) 234 return -EADDRNOTAVAIL; 235 236 /* dev_set_mac_addr() can be called by a master device on bridge's 237 * NETDEV_UNREGISTER, but since it's being destroyed do nothing 238 */ 239 if (dev->reg_state != NETREG_REGISTERED) 240 return -EBUSY; 241 242 spin_lock_bh(&br->lock); 243 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 244 /* Mac address will be changed in br_stp_change_bridge_id(). */ 245 br_stp_change_bridge_id(br, addr->sa_data); 246 } 247 spin_unlock_bh(&br->lock); 248 249 return 0; 250 } 251 252 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) 253 { 254 strlcpy(info->driver, "bridge", sizeof(info->driver)); 255 strlcpy(info->version, BR_VERSION, sizeof(info->version)); 256 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 257 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); 258 } 259 260 static int br_get_link_ksettings(struct net_device *dev, 261 struct ethtool_link_ksettings *cmd) 262 { 263 struct net_bridge *br = netdev_priv(dev); 264 struct net_bridge_port *p; 265 266 cmd->base.duplex = DUPLEX_UNKNOWN; 267 cmd->base.port = PORT_OTHER; 268 cmd->base.speed = SPEED_UNKNOWN; 269 270 list_for_each_entry(p, &br->port_list, list) { 271 struct ethtool_link_ksettings ecmd; 272 struct net_device *pdev = p->dev; 273 274 if (!netif_running(pdev) || !netif_oper_up(pdev)) 275 continue; 276 277 if (__ethtool_get_link_ksettings(pdev, &ecmd)) 278 continue; 279 280 if (ecmd.base.speed == (__u32)SPEED_UNKNOWN) 281 continue; 282 283 if (cmd->base.speed == (__u32)SPEED_UNKNOWN || 284 cmd->base.speed < ecmd.base.speed) 285 cmd->base.speed = ecmd.base.speed; 286 } 287 288 return 0; 289 } 290 291 static netdev_features_t br_fix_features(struct net_device *dev, 292 netdev_features_t features) 293 { 294 struct net_bridge *br = netdev_priv(dev); 295 296 return br_features_recompute(br, features); 297 } 298 299 #ifdef CONFIG_NET_POLL_CONTROLLER 300 static void br_poll_controller(struct net_device *br_dev) 301 { 302 } 303 304 static void br_netpoll_cleanup(struct net_device *dev) 305 { 306 struct net_bridge *br = netdev_priv(dev); 307 struct net_bridge_port *p; 308 309 list_for_each_entry(p, &br->port_list, list) 310 br_netpoll_disable(p); 311 } 312 313 static int __br_netpoll_enable(struct net_bridge_port *p) 314 { 315 struct netpoll *np; 316 int err; 317 318 np = kzalloc(sizeof(*p->np), GFP_KERNEL); 319 if (!np) 320 return -ENOMEM; 321 322 err = __netpoll_setup(np, p->dev); 323 if (err) { 324 kfree(np); 325 return err; 326 } 327 328 p->np = np; 329 return err; 330 } 331 332 int br_netpoll_enable(struct net_bridge_port *p) 333 { 334 if (!p->br->dev->npinfo) 335 return 0; 336 337 return __br_netpoll_enable(p); 338 } 339 340 static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 341 { 342 struct net_bridge *br = netdev_priv(dev); 343 struct net_bridge_port *p; 344 int err = 0; 345 346 list_for_each_entry(p, &br->port_list, list) { 347 if (!p->dev) 348 continue; 349 err = __br_netpoll_enable(p); 350 if (err) 351 goto fail; 352 } 353 354 out: 355 return err; 356 357 fail: 358 br_netpoll_cleanup(dev); 359 goto out; 360 } 361 362 void br_netpoll_disable(struct net_bridge_port *p) 363 { 364 struct netpoll *np = p->np; 365 366 if (!np) 367 return; 368 369 p->np = NULL; 370 371 __netpoll_free(np); 372 } 373 374 #endif 375 376 static int br_add_slave(struct net_device *dev, struct net_device *slave_dev, 377 struct netlink_ext_ack *extack) 378 379 { 380 struct net_bridge *br = netdev_priv(dev); 381 382 return br_add_if(br, slave_dev, extack); 383 } 384 385 static int br_del_slave(struct net_device *dev, struct net_device *slave_dev) 386 { 387 struct net_bridge *br = netdev_priv(dev); 388 389 return br_del_if(br, slave_dev); 390 } 391 392 static int br_fill_forward_path(struct net_device_path_ctx *ctx, 393 struct net_device_path *path) 394 { 395 struct net_bridge_fdb_entry *f; 396 struct net_bridge_port *dst; 397 struct net_bridge *br; 398 399 if (netif_is_bridge_port(ctx->dev)) 400 return -1; 401 402 br = netdev_priv(ctx->dev); 403 404 br_vlan_fill_forward_path_pvid(br, ctx, path); 405 406 f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id); 407 if (!f || !f->dst) 408 return -1; 409 410 dst = READ_ONCE(f->dst); 411 if (!dst) 412 return -1; 413 414 if (br_vlan_fill_forward_path_mode(br, dst, path)) 415 return -1; 416 417 path->type = DEV_PATH_BRIDGE; 418 path->dev = dst->br->dev; 419 ctx->dev = dst->dev; 420 421 switch (path->bridge.vlan_mode) { 422 case DEV_PATH_BR_VLAN_TAG: 423 if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) 424 return -ENOSPC; 425 ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id; 426 ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto; 427 ctx->num_vlans++; 428 break; 429 case DEV_PATH_BR_VLAN_UNTAG_HW: 430 case DEV_PATH_BR_VLAN_UNTAG: 431 ctx->num_vlans--; 432 break; 433 case DEV_PATH_BR_VLAN_KEEP: 434 break; 435 } 436 437 return 0; 438 } 439 440 static const struct ethtool_ops br_ethtool_ops = { 441 .get_drvinfo = br_getinfo, 442 .get_link = ethtool_op_get_link, 443 .get_link_ksettings = br_get_link_ksettings, 444 }; 445 446 static const struct net_device_ops br_netdev_ops = { 447 .ndo_open = br_dev_open, 448 .ndo_stop = br_dev_stop, 449 .ndo_init = br_dev_init, 450 .ndo_uninit = br_dev_uninit, 451 .ndo_start_xmit = br_dev_xmit, 452 .ndo_get_stats64 = dev_get_tstats64, 453 .ndo_set_mac_address = br_set_mac_address, 454 .ndo_set_rx_mode = br_dev_set_multicast_list, 455 .ndo_change_rx_flags = br_dev_change_rx_flags, 456 .ndo_change_mtu = br_change_mtu, 457 .ndo_siocdevprivate = br_dev_siocdevprivate, 458 #ifdef CONFIG_NET_POLL_CONTROLLER 459 .ndo_netpoll_setup = br_netpoll_setup, 460 .ndo_netpoll_cleanup = br_netpoll_cleanup, 461 .ndo_poll_controller = br_poll_controller, 462 #endif 463 .ndo_add_slave = br_add_slave, 464 .ndo_del_slave = br_del_slave, 465 .ndo_fix_features = br_fix_features, 466 .ndo_fdb_add = br_fdb_add, 467 .ndo_fdb_del = br_fdb_delete, 468 .ndo_fdb_del_bulk = br_fdb_delete_bulk, 469 .ndo_fdb_dump = br_fdb_dump, 470 .ndo_fdb_get = br_fdb_get, 471 .ndo_bridge_getlink = br_getlink, 472 .ndo_bridge_setlink = br_setlink, 473 .ndo_bridge_dellink = br_dellink, 474 .ndo_features_check = passthru_features_check, 475 .ndo_fill_forward_path = br_fill_forward_path, 476 }; 477 478 static struct device_type br_type = { 479 .name = "bridge", 480 }; 481 482 void br_dev_setup(struct net_device *dev) 483 { 484 struct net_bridge *br = netdev_priv(dev); 485 486 eth_hw_addr_random(dev); 487 ether_setup(dev); 488 489 dev->netdev_ops = &br_netdev_ops; 490 dev->needs_free_netdev = true; 491 dev->ethtool_ops = &br_ethtool_ops; 492 SET_NETDEV_DEVTYPE(dev, &br_type); 493 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 494 495 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | 496 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 497 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 498 NETIF_F_HW_VLAN_STAG_TX; 499 dev->vlan_features = COMMON_FEATURES; 500 501 br->dev = dev; 502 spin_lock_init(&br->lock); 503 INIT_LIST_HEAD(&br->port_list); 504 INIT_HLIST_HEAD(&br->fdb_list); 505 INIT_HLIST_HEAD(&br->frame_type_list); 506 #if IS_ENABLED(CONFIG_BRIDGE_MRP) 507 INIT_HLIST_HEAD(&br->mrp_list); 508 #endif 509 #if IS_ENABLED(CONFIG_BRIDGE_CFM) 510 INIT_HLIST_HEAD(&br->mep_list); 511 #endif 512 spin_lock_init(&br->hash_lock); 513 514 br->bridge_id.prio[0] = 0x80; 515 br->bridge_id.prio[1] = 0x00; 516 517 ether_addr_copy(br->group_addr, eth_stp_addr); 518 519 br->stp_enabled = BR_NO_STP; 520 br->group_fwd_mask = BR_GROUPFWD_DEFAULT; 521 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; 522 523 br->designated_root = br->bridge_id; 524 br->bridge_max_age = br->max_age = 20 * HZ; 525 br->bridge_hello_time = br->hello_time = 2 * HZ; 526 br->bridge_forward_delay = br->forward_delay = 15 * HZ; 527 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME; 528 dev->max_mtu = ETH_MAX_MTU; 529 530 br_netfilter_rtable_init(br); 531 br_stp_timer_init(br); 532 br_multicast_init(br); 533 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); 534 } 535