xref: /openbmc/linux/net/bridge/br_device.c (revision 0e17c50f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Device handling code
4  *	Linux ethernet bridge
5  *
6  *	Authors:
7  *	Lennert Buytenhek		<buytenh@gnu.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/netpoll.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/list.h>
16 #include <linux/netfilter_bridge.h>
17 
18 #include <linux/uaccess.h>
19 #include "br_private.h"
20 
21 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
22 			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
23 
24 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25 EXPORT_SYMBOL_GPL(nf_br_ops);
26 
27 /* net device transmit always called with BH disabled */
28 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
29 {
30 	struct net_bridge *br = netdev_priv(dev);
31 	struct net_bridge_fdb_entry *dst;
32 	struct net_bridge_mdb_entry *mdst;
33 	struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
34 	const struct nf_br_ops *nf_ops;
35 	u8 state = BR_STATE_FORWARDING;
36 	const unsigned char *dest;
37 	struct ethhdr *eth;
38 	u16 vid = 0;
39 
40 	rcu_read_lock();
41 	nf_ops = rcu_dereference(nf_br_ops);
42 	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
43 		rcu_read_unlock();
44 		return NETDEV_TX_OK;
45 	}
46 
47 	u64_stats_update_begin(&brstats->syncp);
48 	brstats->tx_packets++;
49 	brstats->tx_bytes += skb->len;
50 	u64_stats_update_end(&brstats->syncp);
51 
52 	br_switchdev_frame_unmark(skb);
53 	BR_INPUT_SKB_CB(skb)->brdev = dev;
54 	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
55 
56 	skb_reset_mac_header(skb);
57 	eth = eth_hdr(skb);
58 	skb_pull(skb, ETH_HLEN);
59 
60 	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state))
61 		goto out;
62 
63 	if (IS_ENABLED(CONFIG_INET) &&
64 	    (eth->h_proto == htons(ETH_P_ARP) ||
65 	     eth->h_proto == htons(ETH_P_RARP)) &&
66 	    br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
67 		br_do_proxy_suppress_arp(skb, br, vid, NULL);
68 	} else if (IS_ENABLED(CONFIG_IPV6) &&
69 		   skb->protocol == htons(ETH_P_IPV6) &&
70 		   br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
71 		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
72 				 sizeof(struct nd_msg)) &&
73 		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
74 			struct nd_msg *msg, _msg;
75 
76 			msg = br_is_nd_neigh_msg(skb, &_msg);
77 			if (msg)
78 				br_do_suppress_nd(skb, br, vid, NULL, msg);
79 	}
80 
81 	dest = eth_hdr(skb)->h_dest;
82 	if (is_broadcast_ether_addr(dest)) {
83 		br_flood(br, skb, BR_PKT_BROADCAST, false, true);
84 	} else if (is_multicast_ether_addr(dest)) {
85 		if (unlikely(netpoll_tx_running(dev))) {
86 			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
87 			goto out;
88 		}
89 		if (br_multicast_rcv(br, NULL, skb, vid)) {
90 			kfree_skb(skb);
91 			goto out;
92 		}
93 
94 		mdst = br_mdb_get(br, skb, vid);
95 		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
96 		    br_multicast_querier_exists(br, eth_hdr(skb)))
97 			br_multicast_flood(mdst, skb, false, true);
98 		else
99 			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
100 	} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
101 		br_forward(dst->dst, skb, false, true);
102 	} else {
103 		br_flood(br, skb, BR_PKT_UNICAST, false, true);
104 	}
105 out:
106 	rcu_read_unlock();
107 	return NETDEV_TX_OK;
108 }
109 
110 static int br_dev_init(struct net_device *dev)
111 {
112 	struct net_bridge *br = netdev_priv(dev);
113 	int err;
114 
115 	br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
116 	if (!br->stats)
117 		return -ENOMEM;
118 
119 	err = br_fdb_hash_init(br);
120 	if (err) {
121 		free_percpu(br->stats);
122 		return err;
123 	}
124 
125 	err = br_mdb_hash_init(br);
126 	if (err) {
127 		free_percpu(br->stats);
128 		br_fdb_hash_fini(br);
129 		return err;
130 	}
131 
132 	err = br_vlan_init(br);
133 	if (err) {
134 		free_percpu(br->stats);
135 		br_mdb_hash_fini(br);
136 		br_fdb_hash_fini(br);
137 		return err;
138 	}
139 
140 	err = br_multicast_init_stats(br);
141 	if (err) {
142 		free_percpu(br->stats);
143 		br_vlan_flush(br);
144 		br_mdb_hash_fini(br);
145 		br_fdb_hash_fini(br);
146 	}
147 
148 	return err;
149 }
150 
151 static void br_dev_uninit(struct net_device *dev)
152 {
153 	struct net_bridge *br = netdev_priv(dev);
154 
155 	br_multicast_dev_del(br);
156 	br_multicast_uninit_stats(br);
157 	br_vlan_flush(br);
158 	br_mdb_hash_fini(br);
159 	br_fdb_hash_fini(br);
160 	free_percpu(br->stats);
161 }
162 
163 static int br_dev_open(struct net_device *dev)
164 {
165 	struct net_bridge *br = netdev_priv(dev);
166 
167 	netdev_update_features(dev);
168 	netif_start_queue(dev);
169 	br_stp_enable_bridge(br);
170 	br_multicast_open(br);
171 
172 	return 0;
173 }
174 
175 static void br_dev_set_multicast_list(struct net_device *dev)
176 {
177 }
178 
179 static void br_dev_change_rx_flags(struct net_device *dev, int change)
180 {
181 	if (change & IFF_PROMISC)
182 		br_manage_promisc(netdev_priv(dev));
183 }
184 
185 static int br_dev_stop(struct net_device *dev)
186 {
187 	struct net_bridge *br = netdev_priv(dev);
188 
189 	br_stp_disable_bridge(br);
190 	br_multicast_stop(br);
191 
192 	netif_stop_queue(dev);
193 
194 	return 0;
195 }
196 
197 static void br_get_stats64(struct net_device *dev,
198 			   struct rtnl_link_stats64 *stats)
199 {
200 	struct net_bridge *br = netdev_priv(dev);
201 	struct pcpu_sw_netstats tmp, sum = { 0 };
202 	unsigned int cpu;
203 
204 	for_each_possible_cpu(cpu) {
205 		unsigned int start;
206 		const struct pcpu_sw_netstats *bstats
207 			= per_cpu_ptr(br->stats, cpu);
208 		do {
209 			start = u64_stats_fetch_begin_irq(&bstats->syncp);
210 			memcpy(&tmp, bstats, sizeof(tmp));
211 		} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
212 		sum.tx_bytes   += tmp.tx_bytes;
213 		sum.tx_packets += tmp.tx_packets;
214 		sum.rx_bytes   += tmp.rx_bytes;
215 		sum.rx_packets += tmp.rx_packets;
216 	}
217 
218 	stats->tx_bytes   = sum.tx_bytes;
219 	stats->tx_packets = sum.tx_packets;
220 	stats->rx_bytes   = sum.rx_bytes;
221 	stats->rx_packets = sum.rx_packets;
222 }
223 
224 static int br_change_mtu(struct net_device *dev, int new_mtu)
225 {
226 	struct net_bridge *br = netdev_priv(dev);
227 
228 	dev->mtu = new_mtu;
229 
230 	/* this flag will be cleared if the MTU was automatically adjusted */
231 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
232 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
233 	/* remember the MTU in the rtable for PMTU */
234 	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
235 #endif
236 
237 	return 0;
238 }
239 
240 /* Allow setting mac address to any valid ethernet address. */
241 static int br_set_mac_address(struct net_device *dev, void *p)
242 {
243 	struct net_bridge *br = netdev_priv(dev);
244 	struct sockaddr *addr = p;
245 
246 	if (!is_valid_ether_addr(addr->sa_data))
247 		return -EADDRNOTAVAIL;
248 
249 	/* dev_set_mac_addr() can be called by a master device on bridge's
250 	 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
251 	 */
252 	if (dev->reg_state != NETREG_REGISTERED)
253 		return -EBUSY;
254 
255 	spin_lock_bh(&br->lock);
256 	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
257 		/* Mac address will be changed in br_stp_change_bridge_id(). */
258 		br_stp_change_bridge_id(br, addr->sa_data);
259 	}
260 	spin_unlock_bh(&br->lock);
261 
262 	return 0;
263 }
264 
265 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
266 {
267 	strlcpy(info->driver, "bridge", sizeof(info->driver));
268 	strlcpy(info->version, BR_VERSION, sizeof(info->version));
269 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
270 	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
271 }
272 
273 static int br_get_link_ksettings(struct net_device *dev,
274 				 struct ethtool_link_ksettings *cmd)
275 {
276 	struct net_bridge *br = netdev_priv(dev);
277 	struct net_bridge_port *p;
278 
279 	cmd->base.duplex = DUPLEX_UNKNOWN;
280 	cmd->base.port = PORT_OTHER;
281 	cmd->base.speed = SPEED_UNKNOWN;
282 
283 	list_for_each_entry(p, &br->port_list, list) {
284 		struct ethtool_link_ksettings ecmd;
285 		struct net_device *pdev = p->dev;
286 
287 		if (!netif_running(pdev) || !netif_oper_up(pdev))
288 			continue;
289 
290 		if (__ethtool_get_link_ksettings(pdev, &ecmd))
291 			continue;
292 
293 		if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
294 			continue;
295 
296 		if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
297 		    cmd->base.speed < ecmd.base.speed)
298 			cmd->base.speed = ecmd.base.speed;
299 	}
300 
301 	return 0;
302 }
303 
304 static netdev_features_t br_fix_features(struct net_device *dev,
305 	netdev_features_t features)
306 {
307 	struct net_bridge *br = netdev_priv(dev);
308 
309 	return br_features_recompute(br, features);
310 }
311 
312 #ifdef CONFIG_NET_POLL_CONTROLLER
313 static void br_poll_controller(struct net_device *br_dev)
314 {
315 }
316 
317 static void br_netpoll_cleanup(struct net_device *dev)
318 {
319 	struct net_bridge *br = netdev_priv(dev);
320 	struct net_bridge_port *p;
321 
322 	list_for_each_entry(p, &br->port_list, list)
323 		br_netpoll_disable(p);
324 }
325 
326 static int __br_netpoll_enable(struct net_bridge_port *p)
327 {
328 	struct netpoll *np;
329 	int err;
330 
331 	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
332 	if (!np)
333 		return -ENOMEM;
334 
335 	err = __netpoll_setup(np, p->dev);
336 	if (err) {
337 		kfree(np);
338 		return err;
339 	}
340 
341 	p->np = np;
342 	return err;
343 }
344 
345 int br_netpoll_enable(struct net_bridge_port *p)
346 {
347 	if (!p->br->dev->npinfo)
348 		return 0;
349 
350 	return __br_netpoll_enable(p);
351 }
352 
353 static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
354 {
355 	struct net_bridge *br = netdev_priv(dev);
356 	struct net_bridge_port *p;
357 	int err = 0;
358 
359 	list_for_each_entry(p, &br->port_list, list) {
360 		if (!p->dev)
361 			continue;
362 		err = __br_netpoll_enable(p);
363 		if (err)
364 			goto fail;
365 	}
366 
367 out:
368 	return err;
369 
370 fail:
371 	br_netpoll_cleanup(dev);
372 	goto out;
373 }
374 
375 void br_netpoll_disable(struct net_bridge_port *p)
376 {
377 	struct netpoll *np = p->np;
378 
379 	if (!np)
380 		return;
381 
382 	p->np = NULL;
383 
384 	__netpoll_free(np);
385 }
386 
387 #endif
388 
389 static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
390 			struct netlink_ext_ack *extack)
391 
392 {
393 	struct net_bridge *br = netdev_priv(dev);
394 
395 	return br_add_if(br, slave_dev, extack);
396 }
397 
398 static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
399 {
400 	struct net_bridge *br = netdev_priv(dev);
401 
402 	return br_del_if(br, slave_dev);
403 }
404 
405 static const struct ethtool_ops br_ethtool_ops = {
406 	.get_drvinfo		 = br_getinfo,
407 	.get_link		 = ethtool_op_get_link,
408 	.get_link_ksettings	 = br_get_link_ksettings,
409 };
410 
411 static const struct net_device_ops br_netdev_ops = {
412 	.ndo_open		 = br_dev_open,
413 	.ndo_stop		 = br_dev_stop,
414 	.ndo_init		 = br_dev_init,
415 	.ndo_uninit		 = br_dev_uninit,
416 	.ndo_start_xmit		 = br_dev_xmit,
417 	.ndo_get_stats64	 = br_get_stats64,
418 	.ndo_set_mac_address	 = br_set_mac_address,
419 	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
420 	.ndo_change_rx_flags	 = br_dev_change_rx_flags,
421 	.ndo_change_mtu		 = br_change_mtu,
422 	.ndo_do_ioctl		 = br_dev_ioctl,
423 #ifdef CONFIG_NET_POLL_CONTROLLER
424 	.ndo_netpoll_setup	 = br_netpoll_setup,
425 	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
426 	.ndo_poll_controller	 = br_poll_controller,
427 #endif
428 	.ndo_add_slave		 = br_add_slave,
429 	.ndo_del_slave		 = br_del_slave,
430 	.ndo_fix_features        = br_fix_features,
431 	.ndo_fdb_add		 = br_fdb_add,
432 	.ndo_fdb_del		 = br_fdb_delete,
433 	.ndo_fdb_dump		 = br_fdb_dump,
434 	.ndo_fdb_get		 = br_fdb_get,
435 	.ndo_bridge_getlink	 = br_getlink,
436 	.ndo_bridge_setlink	 = br_setlink,
437 	.ndo_bridge_dellink	 = br_dellink,
438 	.ndo_features_check	 = passthru_features_check,
439 };
440 
441 static struct device_type br_type = {
442 	.name	= "bridge",
443 };
444 
445 void br_dev_setup(struct net_device *dev)
446 {
447 	struct net_bridge *br = netdev_priv(dev);
448 
449 	eth_hw_addr_random(dev);
450 	ether_setup(dev);
451 
452 	dev->netdev_ops = &br_netdev_ops;
453 	dev->needs_free_netdev = true;
454 	dev->ethtool_ops = &br_ethtool_ops;
455 	SET_NETDEV_DEVTYPE(dev, &br_type);
456 	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
457 
458 	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
459 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
460 	dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
461 			   NETIF_F_HW_VLAN_STAG_TX;
462 	dev->vlan_features = COMMON_FEATURES;
463 
464 	br->dev = dev;
465 	spin_lock_init(&br->lock);
466 	INIT_LIST_HEAD(&br->port_list);
467 	INIT_HLIST_HEAD(&br->fdb_list);
468 	spin_lock_init(&br->hash_lock);
469 
470 	br->bridge_id.prio[0] = 0x80;
471 	br->bridge_id.prio[1] = 0x00;
472 
473 	ether_addr_copy(br->group_addr, eth_stp_addr);
474 
475 	br->stp_enabled = BR_NO_STP;
476 	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
477 	br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
478 
479 	br->designated_root = br->bridge_id;
480 	br->bridge_max_age = br->max_age = 20 * HZ;
481 	br->bridge_hello_time = br->hello_time = 2 * HZ;
482 	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
483 	br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
484 	dev->max_mtu = ETH_MAX_MTU;
485 
486 	br_netfilter_rtable_init(br);
487 	br_stp_timer_init(br);
488 	br_multicast_init(br);
489 	INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
490 }
491