1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Device handling code 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/netpoll.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/list.h> 16 #include <linux/netfilter_bridge.h> 17 18 #include <linux/uaccess.h> 19 #include "br_private.h" 20 21 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \ 22 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM) 23 24 const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 25 EXPORT_SYMBOL_GPL(nf_br_ops); 26 27 /* net device transmit always called with BH disabled */ 28 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 29 { 30 struct net_bridge *br = netdev_priv(dev); 31 struct net_bridge_fdb_entry *dst; 32 struct net_bridge_mdb_entry *mdst; 33 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); 34 const struct nf_br_ops *nf_ops; 35 const unsigned char *dest; 36 struct ethhdr *eth; 37 u16 vid = 0; 38 39 rcu_read_lock(); 40 nf_ops = rcu_dereference(nf_br_ops); 41 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { 42 rcu_read_unlock(); 43 return NETDEV_TX_OK; 44 } 45 46 u64_stats_update_begin(&brstats->syncp); 47 brstats->tx_packets++; 48 brstats->tx_bytes += skb->len; 49 u64_stats_update_end(&brstats->syncp); 50 51 br_switchdev_frame_unmark(skb); 52 BR_INPUT_SKB_CB(skb)->brdev = dev; 53 BR_INPUT_SKB_CB(skb)->frag_max_size = 0; 54 55 skb_reset_mac_header(skb); 56 eth = eth_hdr(skb); 57 skb_pull(skb, ETH_HLEN); 58 59 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid)) 60 goto out; 61 62 if (IS_ENABLED(CONFIG_INET) && 63 (eth->h_proto == htons(ETH_P_ARP) || 64 eth->h_proto == htons(ETH_P_RARP)) && 65 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { 66 br_do_proxy_suppress_arp(skb, br, vid, NULL); 67 } else if (IS_ENABLED(CONFIG_IPV6) && 68 skb->protocol == htons(ETH_P_IPV6) && 69 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && 70 pskb_may_pull(skb, sizeof(struct ipv6hdr) + 71 sizeof(struct nd_msg)) && 72 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 73 struct nd_msg *msg, _msg; 74 75 msg = br_is_nd_neigh_msg(skb, &_msg); 76 if (msg) 77 br_do_suppress_nd(skb, br, vid, NULL, msg); 78 } 79 80 dest = eth_hdr(skb)->h_dest; 81 if (is_broadcast_ether_addr(dest)) { 82 br_flood(br, skb, BR_PKT_BROADCAST, false, true); 83 } else if (is_multicast_ether_addr(dest)) { 84 if (unlikely(netpoll_tx_running(dev))) { 85 br_flood(br, skb, BR_PKT_MULTICAST, false, true); 86 goto out; 87 } 88 if (br_multicast_rcv(br, NULL, skb, vid)) { 89 kfree_skb(skb); 90 goto out; 91 } 92 93 mdst = br_mdb_get(br, skb, vid); 94 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 95 br_multicast_querier_exists(br, eth_hdr(skb))) 96 br_multicast_flood(mdst, skb, false, true); 97 else 98 br_flood(br, skb, BR_PKT_MULTICAST, false, true); 99 } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) { 100 br_forward(dst->dst, skb, false, true); 101 } else { 102 br_flood(br, skb, BR_PKT_UNICAST, false, true); 103 } 104 out: 105 rcu_read_unlock(); 106 return NETDEV_TX_OK; 107 } 108 109 static int br_dev_init(struct net_device *dev) 110 { 111 struct net_bridge *br = netdev_priv(dev); 112 int err; 113 114 br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 115 if (!br->stats) 116 return -ENOMEM; 117 118 err = br_fdb_hash_init(br); 119 if (err) { 120 free_percpu(br->stats); 121 return err; 122 } 123 124 err = br_mdb_hash_init(br); 125 if (err) { 126 free_percpu(br->stats); 127 br_fdb_hash_fini(br); 128 return err; 129 } 130 131 err = br_vlan_init(br); 132 if (err) { 133 free_percpu(br->stats); 134 br_mdb_hash_fini(br); 135 br_fdb_hash_fini(br); 136 return err; 137 } 138 139 err = br_multicast_init_stats(br); 140 if (err) { 141 free_percpu(br->stats); 142 br_vlan_flush(br); 143 br_mdb_hash_fini(br); 144 br_fdb_hash_fini(br); 145 } 146 147 return err; 148 } 149 150 static void br_dev_uninit(struct net_device *dev) 151 { 152 struct net_bridge *br = netdev_priv(dev); 153 154 br_multicast_dev_del(br); 155 br_multicast_uninit_stats(br); 156 br_vlan_flush(br); 157 br_mdb_hash_fini(br); 158 br_fdb_hash_fini(br); 159 free_percpu(br->stats); 160 } 161 162 static int br_dev_open(struct net_device *dev) 163 { 164 struct net_bridge *br = netdev_priv(dev); 165 166 netdev_update_features(dev); 167 netif_start_queue(dev); 168 br_stp_enable_bridge(br); 169 br_multicast_open(br); 170 171 return 0; 172 } 173 174 static void br_dev_set_multicast_list(struct net_device *dev) 175 { 176 } 177 178 static void br_dev_change_rx_flags(struct net_device *dev, int change) 179 { 180 if (change & IFF_PROMISC) 181 br_manage_promisc(netdev_priv(dev)); 182 } 183 184 static int br_dev_stop(struct net_device *dev) 185 { 186 struct net_bridge *br = netdev_priv(dev); 187 188 br_stp_disable_bridge(br); 189 br_multicast_stop(br); 190 191 netif_stop_queue(dev); 192 193 return 0; 194 } 195 196 static void br_get_stats64(struct net_device *dev, 197 struct rtnl_link_stats64 *stats) 198 { 199 struct net_bridge *br = netdev_priv(dev); 200 struct pcpu_sw_netstats tmp, sum = { 0 }; 201 unsigned int cpu; 202 203 for_each_possible_cpu(cpu) { 204 unsigned int start; 205 const struct pcpu_sw_netstats *bstats 206 = per_cpu_ptr(br->stats, cpu); 207 do { 208 start = u64_stats_fetch_begin_irq(&bstats->syncp); 209 memcpy(&tmp, bstats, sizeof(tmp)); 210 } while (u64_stats_fetch_retry_irq(&bstats->syncp, start)); 211 sum.tx_bytes += tmp.tx_bytes; 212 sum.tx_packets += tmp.tx_packets; 213 sum.rx_bytes += tmp.rx_bytes; 214 sum.rx_packets += tmp.rx_packets; 215 } 216 217 stats->tx_bytes = sum.tx_bytes; 218 stats->tx_packets = sum.tx_packets; 219 stats->rx_bytes = sum.rx_bytes; 220 stats->rx_packets = sum.rx_packets; 221 } 222 223 static int br_change_mtu(struct net_device *dev, int new_mtu) 224 { 225 struct net_bridge *br = netdev_priv(dev); 226 227 dev->mtu = new_mtu; 228 229 /* this flag will be cleared if the MTU was automatically adjusted */ 230 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true); 231 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 232 /* remember the MTU in the rtable for PMTU */ 233 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); 234 #endif 235 236 return 0; 237 } 238 239 /* Allow setting mac address to any valid ethernet address. */ 240 static int br_set_mac_address(struct net_device *dev, void *p) 241 { 242 struct net_bridge *br = netdev_priv(dev); 243 struct sockaddr *addr = p; 244 245 if (!is_valid_ether_addr(addr->sa_data)) 246 return -EADDRNOTAVAIL; 247 248 spin_lock_bh(&br->lock); 249 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 250 /* Mac address will be changed in br_stp_change_bridge_id(). */ 251 br_stp_change_bridge_id(br, addr->sa_data); 252 } 253 spin_unlock_bh(&br->lock); 254 255 return 0; 256 } 257 258 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) 259 { 260 strlcpy(info->driver, "bridge", sizeof(info->driver)); 261 strlcpy(info->version, BR_VERSION, sizeof(info->version)); 262 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 263 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); 264 } 265 266 static netdev_features_t br_fix_features(struct net_device *dev, 267 netdev_features_t features) 268 { 269 struct net_bridge *br = netdev_priv(dev); 270 271 return br_features_recompute(br, features); 272 } 273 274 #ifdef CONFIG_NET_POLL_CONTROLLER 275 static void br_poll_controller(struct net_device *br_dev) 276 { 277 } 278 279 static void br_netpoll_cleanup(struct net_device *dev) 280 { 281 struct net_bridge *br = netdev_priv(dev); 282 struct net_bridge_port *p; 283 284 list_for_each_entry(p, &br->port_list, list) 285 br_netpoll_disable(p); 286 } 287 288 static int __br_netpoll_enable(struct net_bridge_port *p) 289 { 290 struct netpoll *np; 291 int err; 292 293 np = kzalloc(sizeof(*p->np), GFP_KERNEL); 294 if (!np) 295 return -ENOMEM; 296 297 err = __netpoll_setup(np, p->dev); 298 if (err) { 299 kfree(np); 300 return err; 301 } 302 303 p->np = np; 304 return err; 305 } 306 307 int br_netpoll_enable(struct net_bridge_port *p) 308 { 309 if (!p->br->dev->npinfo) 310 return 0; 311 312 return __br_netpoll_enable(p); 313 } 314 315 static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 316 { 317 struct net_bridge *br = netdev_priv(dev); 318 struct net_bridge_port *p; 319 int err = 0; 320 321 list_for_each_entry(p, &br->port_list, list) { 322 if (!p->dev) 323 continue; 324 err = __br_netpoll_enable(p); 325 if (err) 326 goto fail; 327 } 328 329 out: 330 return err; 331 332 fail: 333 br_netpoll_cleanup(dev); 334 goto out; 335 } 336 337 void br_netpoll_disable(struct net_bridge_port *p) 338 { 339 struct netpoll *np = p->np; 340 341 if (!np) 342 return; 343 344 p->np = NULL; 345 346 __netpoll_free(np); 347 } 348 349 #endif 350 351 static int br_add_slave(struct net_device *dev, struct net_device *slave_dev, 352 struct netlink_ext_ack *extack) 353 354 { 355 struct net_bridge *br = netdev_priv(dev); 356 357 return br_add_if(br, slave_dev, extack); 358 } 359 360 static int br_del_slave(struct net_device *dev, struct net_device *slave_dev) 361 { 362 struct net_bridge *br = netdev_priv(dev); 363 364 return br_del_if(br, slave_dev); 365 } 366 367 static const struct ethtool_ops br_ethtool_ops = { 368 .get_drvinfo = br_getinfo, 369 .get_link = ethtool_op_get_link, 370 }; 371 372 static const struct net_device_ops br_netdev_ops = { 373 .ndo_open = br_dev_open, 374 .ndo_stop = br_dev_stop, 375 .ndo_init = br_dev_init, 376 .ndo_uninit = br_dev_uninit, 377 .ndo_start_xmit = br_dev_xmit, 378 .ndo_get_stats64 = br_get_stats64, 379 .ndo_set_mac_address = br_set_mac_address, 380 .ndo_set_rx_mode = br_dev_set_multicast_list, 381 .ndo_change_rx_flags = br_dev_change_rx_flags, 382 .ndo_change_mtu = br_change_mtu, 383 .ndo_do_ioctl = br_dev_ioctl, 384 #ifdef CONFIG_NET_POLL_CONTROLLER 385 .ndo_netpoll_setup = br_netpoll_setup, 386 .ndo_netpoll_cleanup = br_netpoll_cleanup, 387 .ndo_poll_controller = br_poll_controller, 388 #endif 389 .ndo_add_slave = br_add_slave, 390 .ndo_del_slave = br_del_slave, 391 .ndo_fix_features = br_fix_features, 392 .ndo_fdb_add = br_fdb_add, 393 .ndo_fdb_del = br_fdb_delete, 394 .ndo_fdb_dump = br_fdb_dump, 395 .ndo_fdb_get = br_fdb_get, 396 .ndo_bridge_getlink = br_getlink, 397 .ndo_bridge_setlink = br_setlink, 398 .ndo_bridge_dellink = br_dellink, 399 .ndo_features_check = passthru_features_check, 400 }; 401 402 static struct device_type br_type = { 403 .name = "bridge", 404 }; 405 406 void br_dev_setup(struct net_device *dev) 407 { 408 struct net_bridge *br = netdev_priv(dev); 409 410 eth_hw_addr_random(dev); 411 ether_setup(dev); 412 413 dev->netdev_ops = &br_netdev_ops; 414 dev->needs_free_netdev = true; 415 dev->ethtool_ops = &br_ethtool_ops; 416 SET_NETDEV_DEVTYPE(dev, &br_type); 417 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 418 419 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | 420 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 421 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 422 NETIF_F_HW_VLAN_STAG_TX; 423 dev->vlan_features = COMMON_FEATURES; 424 425 br->dev = dev; 426 spin_lock_init(&br->lock); 427 INIT_LIST_HEAD(&br->port_list); 428 INIT_HLIST_HEAD(&br->fdb_list); 429 spin_lock_init(&br->hash_lock); 430 431 br->bridge_id.prio[0] = 0x80; 432 br->bridge_id.prio[1] = 0x00; 433 434 ether_addr_copy(br->group_addr, eth_stp_addr); 435 436 br->stp_enabled = BR_NO_STP; 437 br->group_fwd_mask = BR_GROUPFWD_DEFAULT; 438 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; 439 440 br->designated_root = br->bridge_id; 441 br->bridge_max_age = br->max_age = 20 * HZ; 442 br->bridge_hello_time = br->hello_time = 2 * HZ; 443 br->bridge_forward_delay = br->forward_delay = 15 * HZ; 444 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME; 445 dev->max_mtu = ETH_MAX_MTU; 446 447 br_netfilter_rtable_init(br); 448 br_stp_timer_init(br); 449 br_multicast_init(br); 450 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); 451 } 452