xref: /openbmc/linux/net/bridge/br_forward.c (revision 22fc4c4c9fd60427bcda00878cee94e7622cfa7a)
1 /*
2  *	Forwarding decision
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/skbuff.h>
20 #include <linux/if_vlan.h>
21 #include <linux/netfilter_bridge.h>
22 #include "br_private.h"
23 
24 /* Don't forward packets to originating port or forwarding disabled */
25 static inline int should_deliver(const struct net_bridge_port *p,
26 				 const struct sk_buff *skb)
27 {
28 	struct net_bridge_vlan_group *vg;
29 
30 	vg = nbp_vlan_group_rcu(p);
31 	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
32 		br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
33 		nbp_switchdev_allowed_egress(p, skb) &&
34 		!br_skb_isolated(p, skb);
35 }
36 
37 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
38 {
39 	if (!is_skb_forwardable(skb->dev, skb))
40 		goto drop;
41 
42 	skb_push(skb, ETH_HLEN);
43 	br_drop_fake_rtable(skb);
44 
45 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
46 	    (skb->protocol == htons(ETH_P_8021Q) ||
47 	     skb->protocol == htons(ETH_P_8021AD))) {
48 		int depth;
49 
50 		if (!__vlan_get_protocol(skb, skb->protocol, &depth))
51 			goto drop;
52 
53 		skb_set_network_header(skb, depth);
54 	}
55 
56 	dev_queue_xmit(skb);
57 
58 	return 0;
59 
60 drop:
61 	kfree_skb(skb);
62 	return 0;
63 }
64 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
65 
66 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
67 {
68 	skb->tstamp = 0;
69 	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
70 		       net, sk, skb, NULL, skb->dev,
71 		       br_dev_queue_push_xmit);
72 
73 }
74 EXPORT_SYMBOL_GPL(br_forward_finish);
75 
76 static void __br_forward(const struct net_bridge_port *to,
77 			 struct sk_buff *skb, bool local_orig)
78 {
79 	struct net_bridge_vlan_group *vg;
80 	struct net_device *indev;
81 	struct net *net;
82 	int br_hook;
83 
84 	vg = nbp_vlan_group_rcu(to);
85 	skb = br_handle_vlan(to->br, to, vg, skb);
86 	if (!skb)
87 		return;
88 
89 	indev = skb->dev;
90 	skb->dev = to->dev;
91 	if (!local_orig) {
92 		if (skb_warn_if_lro(skb)) {
93 			kfree_skb(skb);
94 			return;
95 		}
96 		br_hook = NF_BR_FORWARD;
97 		skb_forward_csum(skb);
98 		net = dev_net(indev);
99 	} else {
100 		if (unlikely(netpoll_tx_running(to->br->dev))) {
101 			if (!is_skb_forwardable(skb->dev, skb)) {
102 				kfree_skb(skb);
103 			} else {
104 				skb_push(skb, ETH_HLEN);
105 				br_netpoll_send_skb(to, skb);
106 			}
107 			return;
108 		}
109 		br_hook = NF_BR_LOCAL_OUT;
110 		net = dev_net(skb->dev);
111 		indev = NULL;
112 	}
113 
114 	NF_HOOK(NFPROTO_BRIDGE, br_hook,
115 		net, NULL, skb, indev, skb->dev,
116 		br_forward_finish);
117 }
118 
119 static int deliver_clone(const struct net_bridge_port *prev,
120 			 struct sk_buff *skb, bool local_orig)
121 {
122 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
123 
124 	skb = skb_clone(skb, GFP_ATOMIC);
125 	if (!skb) {
126 		dev->stats.tx_dropped++;
127 		return -ENOMEM;
128 	}
129 
130 	__br_forward(prev, skb, local_orig);
131 	return 0;
132 }
133 
134 /**
135  * br_forward - forward a packet to a specific port
136  * @to: destination port
137  * @skb: packet being forwarded
138  * @local_rcv: packet will be received locally after forwarding
139  * @local_orig: packet is locally originated
140  *
141  * Should be called with rcu_read_lock.
142  */
143 void br_forward(const struct net_bridge_port *to,
144 		struct sk_buff *skb, bool local_rcv, bool local_orig)
145 {
146 	if (unlikely(!to))
147 		goto out;
148 
149 	/* redirect to backup link if the destination port is down */
150 	if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
151 		struct net_bridge_port *backup_port;
152 
153 		backup_port = rcu_dereference(to->backup_port);
154 		if (unlikely(!backup_port))
155 			goto out;
156 		to = backup_port;
157 	}
158 
159 	if (should_deliver(to, skb)) {
160 		if (local_rcv)
161 			deliver_clone(to, skb, local_orig);
162 		else
163 			__br_forward(to, skb, local_orig);
164 		return;
165 	}
166 
167 out:
168 	if (!local_rcv)
169 		kfree_skb(skb);
170 }
171 EXPORT_SYMBOL_GPL(br_forward);
172 
173 static struct net_bridge_port *maybe_deliver(
174 	struct net_bridge_port *prev, struct net_bridge_port *p,
175 	struct sk_buff *skb, bool local_orig)
176 {
177 	int err;
178 
179 	if (!should_deliver(p, skb))
180 		return prev;
181 
182 	if (!prev)
183 		goto out;
184 
185 	err = deliver_clone(prev, skb, local_orig);
186 	if (err)
187 		return ERR_PTR(err);
188 
189 out:
190 	return p;
191 }
192 
193 /* called under rcu_read_lock */
194 void br_flood(struct net_bridge *br, struct sk_buff *skb,
195 	      enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
196 {
197 	u8 igmp_type = br_multicast_igmp_type(skb);
198 	struct net_bridge_port *prev = NULL;
199 	struct net_bridge_port *p;
200 
201 	list_for_each_entry_rcu(p, &br->port_list, list) {
202 		/* Do not flood unicast traffic to ports that turn it off, nor
203 		 * other traffic if flood off, except for traffic we originate
204 		 */
205 		switch (pkt_type) {
206 		case BR_PKT_UNICAST:
207 			if (!(p->flags & BR_FLOOD))
208 				continue;
209 			break;
210 		case BR_PKT_MULTICAST:
211 			if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
212 				continue;
213 			break;
214 		case BR_PKT_BROADCAST:
215 			if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
216 				continue;
217 			break;
218 		}
219 
220 		/* Do not flood to ports that enable proxy ARP */
221 		if (p->flags & BR_PROXYARP)
222 			continue;
223 		if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
224 		    BR_INPUT_SKB_CB(skb)->proxyarp_replied)
225 			continue;
226 
227 		prev = maybe_deliver(prev, p, skb, local_orig);
228 		if (IS_ERR(prev))
229 			goto out;
230 		if (prev == p)
231 			br_multicast_count(p->br, p, skb, igmp_type,
232 					   BR_MCAST_DIR_TX);
233 	}
234 
235 	if (!prev)
236 		goto out;
237 
238 	if (local_rcv)
239 		deliver_clone(prev, skb, local_orig);
240 	else
241 		__br_forward(prev, skb, local_orig);
242 	return;
243 
244 out:
245 	if (!local_rcv)
246 		kfree_skb(skb);
247 }
248 
249 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
250 static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
251 			       const unsigned char *addr, bool local_orig)
252 {
253 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
254 	const unsigned char *src = eth_hdr(skb)->h_source;
255 
256 	if (!should_deliver(p, skb))
257 		return;
258 
259 	/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
260 	if (skb->dev == p->dev && ether_addr_equal(src, addr))
261 		return;
262 
263 	skb = skb_copy(skb, GFP_ATOMIC);
264 	if (!skb) {
265 		dev->stats.tx_dropped++;
266 		return;
267 	}
268 
269 	if (!is_broadcast_ether_addr(addr))
270 		memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
271 
272 	__br_forward(p, skb, local_orig);
273 }
274 
275 /* called with rcu_read_lock */
276 void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
277 			struct sk_buff *skb,
278 			bool local_rcv, bool local_orig)
279 {
280 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
281 	u8 igmp_type = br_multicast_igmp_type(skb);
282 	struct net_bridge *br = netdev_priv(dev);
283 	struct net_bridge_port *prev = NULL;
284 	struct net_bridge_port_group *p;
285 	struct hlist_node *rp;
286 
287 	rp = rcu_dereference(hlist_first_rcu(&br->router_list));
288 	p = mdst ? rcu_dereference(mdst->ports) : NULL;
289 	while (p || rp) {
290 		struct net_bridge_port *port, *lport, *rport;
291 
292 		lport = p ? p->port : NULL;
293 		rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
294 
295 		if ((unsigned long)lport > (unsigned long)rport) {
296 			port = lport;
297 
298 			if (port->flags & BR_MULTICAST_TO_UNICAST) {
299 				maybe_deliver_addr(lport, skb, p->eth_addr,
300 						   local_orig);
301 				goto delivered;
302 			}
303 		} else {
304 			port = rport;
305 		}
306 
307 		prev = maybe_deliver(prev, port, skb, local_orig);
308 delivered:
309 		if (IS_ERR(prev))
310 			goto out;
311 		if (prev == port)
312 			br_multicast_count(port->br, port, skb, igmp_type,
313 					   BR_MCAST_DIR_TX);
314 
315 		if ((unsigned long)lport >= (unsigned long)port)
316 			p = rcu_dereference(p->next);
317 		if ((unsigned long)rport >= (unsigned long)port)
318 			rp = rcu_dereference(hlist_next_rcu(rp));
319 	}
320 
321 	if (!prev)
322 		goto out;
323 
324 	if (local_rcv)
325 		deliver_clone(prev, skb, local_orig);
326 	else
327 		__br_forward(prev, skb, local_orig);
328 	return;
329 
330 out:
331 	if (!local_rcv)
332 		kfree_skb(skb);
333 }
334 #endif
335