xref: /openbmc/linux/net/bridge/br_netfilter_hooks.c (revision 0f9b4c3ca5fdf3e177266ef994071b1a03f07318)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Handle firewalling
4  *	Linux ethernet bridge
5  *
6  *	Authors:
7  *	Lennert Buytenhek		<buytenh@gnu.org>
8  *	Bart De Schuymer		<bdschuym@pandora.be>
9  *
10  *	Lennert dedicates this file to Kerstin Wurdinger.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/ip.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/if_arp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_pppox.h>
23 #include <linux/ppp_defs.h>
24 #include <linux/netfilter_bridge.h>
25 #include <uapi/linux/netfilter_bridge.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter_arp.h>
29 #include <linux/in_route.h>
30 #include <linux/rculist.h>
31 #include <linux/inetdevice.h>
32 
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/addrconf.h>
36 #include <net/dst_metadata.h>
37 #include <net/route.h>
38 #include <net/netfilter/br_netfilter.h>
39 #include <net/netns/generic.h>
40 
41 #include <linux/uaccess.h>
42 #include "br_private.h"
43 #ifdef CONFIG_SYSCTL
44 #include <linux/sysctl.h>
45 #endif
46 
47 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
48 #include <net/netfilter/nf_conntrack_core.h>
49 #endif
50 
51 static unsigned int brnf_net_id __read_mostly;
52 
53 struct brnf_net {
54 	bool enabled;
55 
56 #ifdef CONFIG_SYSCTL
57 	struct ctl_table_header *ctl_hdr;
58 #endif
59 
60 	/* default value is 1 */
61 	int call_iptables;
62 	int call_ip6tables;
63 	int call_arptables;
64 
65 	/* default value is 0 */
66 	int filter_vlan_tagged;
67 	int filter_pppoe_tagged;
68 	int pass_vlan_indev;
69 };
70 
71 #define IS_IP(skb) \
72 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
73 
74 #define IS_IPV6(skb) \
75 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
76 
77 #define IS_ARP(skb) \
78 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
79 
vlan_proto(const struct sk_buff * skb)80 static inline __be16 vlan_proto(const struct sk_buff *skb)
81 {
82 	if (skb_vlan_tag_present(skb))
83 		return skb->protocol;
84 	else if (skb->protocol == htons(ETH_P_8021Q))
85 		return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
86 	else
87 		return 0;
88 }
89 
is_vlan_ip(const struct sk_buff * skb,const struct net * net)90 static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
91 {
92 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
93 
94 	return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
95 }
96 
is_vlan_ipv6(const struct sk_buff * skb,const struct net * net)97 static inline bool is_vlan_ipv6(const struct sk_buff *skb,
98 				const struct net *net)
99 {
100 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
101 
102 	return vlan_proto(skb) == htons(ETH_P_IPV6) &&
103 	       brnet->filter_vlan_tagged;
104 }
105 
is_vlan_arp(const struct sk_buff * skb,const struct net * net)106 static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
107 {
108 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
109 
110 	return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
111 }
112 
pppoe_proto(const struct sk_buff * skb)113 static inline __be16 pppoe_proto(const struct sk_buff *skb)
114 {
115 	return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
116 			    sizeof(struct pppoe_hdr)));
117 }
118 
is_pppoe_ip(const struct sk_buff * skb,const struct net * net)119 static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
120 {
121 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
122 
123 	return skb->protocol == htons(ETH_P_PPP_SES) &&
124 	       pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
125 }
126 
is_pppoe_ipv6(const struct sk_buff * skb,const struct net * net)127 static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
128 				 const struct net *net)
129 {
130 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
131 
132 	return skb->protocol == htons(ETH_P_PPP_SES) &&
133 	       pppoe_proto(skb) == htons(PPP_IPV6) &&
134 	       brnet->filter_pppoe_tagged;
135 }
136 
137 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
138 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
139 
140 struct brnf_frag_data {
141 	char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
142 	u8 encap_size;
143 	u8 size;
144 	u16 vlan_tci;
145 	__be16 vlan_proto;
146 };
147 
148 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
149 
nf_bridge_info_free(struct sk_buff * skb)150 static void nf_bridge_info_free(struct sk_buff *skb)
151 {
152 	skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
153 }
154 
bridge_parent(const struct net_device * dev)155 static inline struct net_device *bridge_parent(const struct net_device *dev)
156 {
157 	struct net_bridge_port *port;
158 
159 	port = br_port_get_rcu(dev);
160 	return port ? port->br->dev : NULL;
161 }
162 
nf_bridge_unshare(struct sk_buff * skb)163 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
164 {
165 	return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
166 }
167 
nf_bridge_encap_header_len(const struct sk_buff * skb)168 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
169 {
170 	switch (skb->protocol) {
171 	case __cpu_to_be16(ETH_P_8021Q):
172 		return VLAN_HLEN;
173 	case __cpu_to_be16(ETH_P_PPP_SES):
174 		return PPPOE_SES_HLEN;
175 	default:
176 		return 0;
177 	}
178 }
179 
nf_bridge_pull_encap_header(struct sk_buff * skb)180 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
181 {
182 	unsigned int len = nf_bridge_encap_header_len(skb);
183 
184 	skb_pull(skb, len);
185 	skb->network_header += len;
186 }
187 
nf_bridge_pull_encap_header_rcsum(struct sk_buff * skb)188 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
189 {
190 	unsigned int len = nf_bridge_encap_header_len(skb);
191 
192 	skb_pull_rcsum(skb, len);
193 	skb->network_header += len;
194 }
195 
196 /* When handing a packet over to the IP layer
197  * check whether we have a skb that is in the
198  * expected format
199  */
200 
br_validate_ipv4(struct net * net,struct sk_buff * skb)201 static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
202 {
203 	const struct iphdr *iph;
204 	u32 len;
205 
206 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
207 		goto inhdr_error;
208 
209 	iph = ip_hdr(skb);
210 
211 	/* Basic sanity checks */
212 	if (iph->ihl < 5 || iph->version != 4)
213 		goto inhdr_error;
214 
215 	if (!pskb_may_pull(skb, iph->ihl*4))
216 		goto inhdr_error;
217 
218 	iph = ip_hdr(skb);
219 	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
220 		goto csum_error;
221 
222 	len = skb_ip_totlen(skb);
223 	if (skb->len < len) {
224 		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
225 		goto drop;
226 	} else if (len < (iph->ihl*4))
227 		goto inhdr_error;
228 
229 	if (pskb_trim_rcsum(skb, len)) {
230 		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
231 		goto drop;
232 	}
233 
234 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
235 	/* We should really parse IP options here but until
236 	 * somebody who actually uses IP options complains to
237 	 * us we'll just silently ignore the options because
238 	 * we're lazy!
239 	 */
240 	return 0;
241 
242 csum_error:
243 	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
244 inhdr_error:
245 	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
246 drop:
247 	return -1;
248 }
249 
nf_bridge_update_protocol(struct sk_buff * skb)250 void nf_bridge_update_protocol(struct sk_buff *skb)
251 {
252 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
253 
254 	switch (nf_bridge->orig_proto) {
255 	case BRNF_PROTO_8021Q:
256 		skb->protocol = htons(ETH_P_8021Q);
257 		break;
258 	case BRNF_PROTO_PPPOE:
259 		skb->protocol = htons(ETH_P_PPP_SES);
260 		break;
261 	case BRNF_PROTO_UNCHANGED:
262 		break;
263 	}
264 }
265 
266 /* Obtain the correct destination MAC address, while preserving the original
267  * source MAC address. If we already know this address, we just copy it. If we
268  * don't, we use the neighbour framework to find out. In both cases, we make
269  * sure that br_handle_frame_finish() is called afterwards.
270  */
br_nf_pre_routing_finish_bridge(struct net * net,struct sock * sk,struct sk_buff * skb)271 int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
272 {
273 	struct neighbour *neigh;
274 	struct dst_entry *dst;
275 
276 	skb->dev = bridge_parent(skb->dev);
277 	if (!skb->dev)
278 		goto free_skb;
279 	dst = skb_dst(skb);
280 	neigh = dst_neigh_lookup_skb(dst, skb);
281 	if (neigh) {
282 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
283 		int ret;
284 
285 		if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
286 		    READ_ONCE(neigh->hh.hh_len)) {
287 			struct net_device *br_indev;
288 
289 			br_indev = nf_bridge_get_physindev(skb, net);
290 			if (!br_indev) {
291 				neigh_release(neigh);
292 				goto free_skb;
293 			}
294 
295 			neigh_hh_bridge(&neigh->hh, skb);
296 			skb->dev = br_indev;
297 
298 			ret = br_handle_frame_finish(net, sk, skb);
299 		} else {
300 			/* the neighbour function below overwrites the complete
301 			 * MAC header, so we save the Ethernet source address and
302 			 * protocol number.
303 			 */
304 			skb_copy_from_linear_data_offset(skb,
305 							 -(ETH_HLEN-ETH_ALEN),
306 							 nf_bridge->neigh_header,
307 							 ETH_HLEN-ETH_ALEN);
308 			/* tell br_dev_xmit to continue with forwarding */
309 			nf_bridge->bridged_dnat = 1;
310 			/* FIXME Need to refragment */
311 			ret = READ_ONCE(neigh->output)(neigh, skb);
312 		}
313 		neigh_release(neigh);
314 		return ret;
315 	}
316 free_skb:
317 	kfree_skb(skb);
318 	return 0;
319 }
320 
321 static inline bool
br_nf_ipv4_daddr_was_changed(const struct sk_buff * skb,const struct nf_bridge_info * nf_bridge)322 br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
323 			     const struct nf_bridge_info *nf_bridge)
324 {
325 	return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
326 }
327 
328 /* This requires some explaining. If DNAT has taken place,
329  * we will need to fix up the destination Ethernet address.
330  * This is also true when SNAT takes place (for the reply direction).
331  *
332  * There are two cases to consider:
333  * 1. The packet was DNAT'ed to a device in the same bridge
334  *    port group as it was received on. We can still bridge
335  *    the packet.
336  * 2. The packet was DNAT'ed to a different device, either
337  *    a non-bridged device or another bridge port group.
338  *    The packet will need to be routed.
339  *
340  * The correct way of distinguishing between these two cases is to
341  * call ip_route_input() and to look at skb->dst->dev, which is
342  * changed to the destination device if ip_route_input() succeeds.
343  *
344  * Let's first consider the case that ip_route_input() succeeds:
345  *
346  * If the output device equals the logical bridge device the packet
347  * came in on, we can consider this bridging. The corresponding MAC
348  * address will be obtained in br_nf_pre_routing_finish_bridge.
349  * Otherwise, the packet is considered to be routed and we just
350  * change the destination MAC address so that the packet will
351  * later be passed up to the IP stack to be routed. For a redirected
352  * packet, ip_route_input() will give back the localhost as output device,
353  * which differs from the bridge device.
354  *
355  * Let's now consider the case that ip_route_input() fails:
356  *
357  * This can be because the destination address is martian, in which case
358  * the packet will be dropped.
359  * If IP forwarding is disabled, ip_route_input() will fail, while
360  * ip_route_output_key() can return success. The source
361  * address for ip_route_output_key() is set to zero, so ip_route_output_key()
362  * thinks we're handling a locally generated packet and won't care
363  * if IP forwarding is enabled. If the output device equals the logical bridge
364  * device, we proceed as if ip_route_input() succeeded. If it differs from the
365  * logical bridge port or if ip_route_output_key() fails we drop the packet.
366  */
br_nf_pre_routing_finish(struct net * net,struct sock * sk,struct sk_buff * skb)367 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
368 {
369 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
370 	struct net_device *dev = skb->dev, *br_indev;
371 	const struct iphdr *iph = ip_hdr(skb);
372 	struct rtable *rt;
373 	int err;
374 
375 	br_indev = nf_bridge_get_physindev(skb, net);
376 	if (!br_indev) {
377 		kfree_skb(skb);
378 		return 0;
379 	}
380 
381 	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
382 
383 	if (nf_bridge->pkt_otherhost) {
384 		skb->pkt_type = PACKET_OTHERHOST;
385 		nf_bridge->pkt_otherhost = false;
386 	}
387 	nf_bridge->in_prerouting = 0;
388 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
389 		err = ip_route_input(skb, iph->daddr, iph->saddr,
390 				     ip4h_dscp(iph), dev);
391 		if (err) {
392 			struct in_device *in_dev = __in_dev_get_rcu(dev);
393 
394 			/* If err equals -EHOSTUNREACH the error is due to a
395 			 * martian destination or due to the fact that
396 			 * forwarding is disabled. For most martian packets,
397 			 * ip_route_output_key() will fail. It won't fail for 2 types of
398 			 * martian destinations: loopback destinations and destination
399 			 * 0.0.0.0. In both cases the packet will be dropped because the
400 			 * destination is the loopback device and not the bridge. */
401 			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
402 				goto free_skb;
403 
404 			rt = ip_route_output(net, iph->daddr, 0,
405 					     RT_TOS(iph->tos), 0);
406 			if (!IS_ERR(rt)) {
407 				/* - Bridged-and-DNAT'ed traffic doesn't
408 				 *   require ip_forwarding. */
409 				if (rt->dst.dev == dev) {
410 					skb_dst_drop(skb);
411 					skb_dst_set(skb, &rt->dst);
412 					goto bridged_dnat;
413 				}
414 				ip_rt_put(rt);
415 			}
416 free_skb:
417 			kfree_skb(skb);
418 			return 0;
419 		} else {
420 			if (skb_dst(skb)->dev == dev) {
421 bridged_dnat:
422 				skb->dev = br_indev;
423 				nf_bridge_update_protocol(skb);
424 				nf_bridge_push_encap_header(skb);
425 				br_nf_hook_thresh(NF_BR_PRE_ROUTING,
426 						  net, sk, skb, skb->dev,
427 						  NULL,
428 						  br_nf_pre_routing_finish_bridge);
429 				return 0;
430 			}
431 			ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
432 			skb->pkt_type = PACKET_HOST;
433 		}
434 	} else {
435 		rt = bridge_parent_rtable(br_indev);
436 		if (!rt) {
437 			kfree_skb(skb);
438 			return 0;
439 		}
440 		skb_dst_drop(skb);
441 		skb_dst_set_noref(skb, &rt->dst);
442 	}
443 
444 	skb->dev = br_indev;
445 	nf_bridge_update_protocol(skb);
446 	nf_bridge_push_encap_header(skb);
447 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
448 			  br_handle_frame_finish);
449 	return 0;
450 }
451 
brnf_get_logical_dev(struct sk_buff * skb,const struct net_device * dev,const struct net * net)452 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
453 					       const struct net_device *dev,
454 					       const struct net *net)
455 {
456 	struct net_device *vlan, *br;
457 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
458 
459 	br = bridge_parent(dev);
460 
461 	if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
462 		return br;
463 
464 	vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
465 				    skb_vlan_tag_get(skb) & VLAN_VID_MASK);
466 
467 	return vlan ? vlan : br;
468 }
469 
470 /* Some common code for IPv4/IPv6 */
setup_pre_routing(struct sk_buff * skb,const struct net * net)471 struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
472 {
473 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
474 
475 	if (skb->pkt_type == PACKET_OTHERHOST) {
476 		skb->pkt_type = PACKET_HOST;
477 		nf_bridge->pkt_otherhost = true;
478 	}
479 
480 	nf_bridge->in_prerouting = 1;
481 	nf_bridge->physinif = skb->dev->ifindex;
482 	skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
483 
484 	if (skb->protocol == htons(ETH_P_8021Q))
485 		nf_bridge->orig_proto = BRNF_PROTO_8021Q;
486 	else if (skb->protocol == htons(ETH_P_PPP_SES))
487 		nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
488 
489 	/* Must drop socket now because of tproxy. */
490 	skb_orphan(skb);
491 	return skb->dev;
492 }
493 
494 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
495  * Replicate the checks that IPv4 does on packet reception.
496  * Set skb->dev to the bridge device (i.e. parent of the
497  * receiving device) to make netfilter happy, the REDIRECT
498  * target in particular.  Save the original destination IP
499  * address to be able to detect DNAT afterwards. */
br_nf_pre_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)500 static unsigned int br_nf_pre_routing(void *priv,
501 				      struct sk_buff *skb,
502 				      const struct nf_hook_state *state)
503 {
504 	struct nf_bridge_info *nf_bridge;
505 	struct net_bridge_port *p;
506 	struct net_bridge *br;
507 	__u32 len = nf_bridge_encap_header_len(skb);
508 	struct brnf_net *brnet;
509 
510 	if (unlikely(!pskb_may_pull(skb, len)))
511 		return NF_DROP;
512 
513 	p = br_port_get_rcu(state->in);
514 	if (p == NULL)
515 		return NF_DROP;
516 	br = p->br;
517 
518 	brnet = net_generic(state->net, brnf_net_id);
519 	if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
520 	    is_pppoe_ipv6(skb, state->net)) {
521 		if (!brnet->call_ip6tables &&
522 		    !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
523 			return NF_ACCEPT;
524 		if (!ipv6_mod_enabled()) {
525 			pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
526 			return NF_DROP;
527 		}
528 
529 		nf_bridge_pull_encap_header_rcsum(skb);
530 		return br_nf_pre_routing_ipv6(priv, skb, state);
531 	}
532 
533 	if (!brnet->call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
534 		return NF_ACCEPT;
535 
536 	if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
537 	    !is_pppoe_ip(skb, state->net))
538 		return NF_ACCEPT;
539 
540 	nf_bridge_pull_encap_header_rcsum(skb);
541 
542 	if (br_validate_ipv4(state->net, skb))
543 		return NF_DROP;
544 
545 	if (!nf_bridge_alloc(skb))
546 		return NF_DROP;
547 	if (!setup_pre_routing(skb, state->net))
548 		return NF_DROP;
549 
550 	nf_bridge = nf_bridge_info_get(skb);
551 	nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
552 
553 	skb->protocol = htons(ETH_P_IP);
554 	skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
555 
556 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
557 		skb->dev, NULL,
558 		br_nf_pre_routing_finish);
559 
560 	return NF_STOLEN;
561 }
562 
563 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
564 /* conntracks' nf_confirm logic cannot handle cloned skbs referencing
565  * the same nf_conn entry, which will happen for multicast (broadcast)
566  * Frames on bridges.
567  *
568  * Example:
569  *      macvlan0
570  *      br0
571  *  ethX  ethY
572  *
573  * ethX (or Y) receives multicast or broadcast packet containing
574  * an IP packet, not yet in conntrack table.
575  *
576  * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
577  *    -> skb->_nfct now references a unconfirmed entry
578  * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
579  *    interface.
580  * 3. skb gets passed up the stack.
581  * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
582  *    and schedules a work queue to send them out on the lower devices.
583  *
584  *    The clone skb->_nfct is not a copy, it is the same entry as the
585  *    original skb.  The macvlan rx handler then returns RX_HANDLER_PASS.
586  * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
587  *
588  * The Macvlan broadcast worker and normal confirm path will race.
589  *
590  * This race will not happen if step 2 already confirmed a clone. In that
591  * case later steps perform skb_clone() with skb->_nfct already confirmed (in
592  * hash table).  This works fine.
593  *
594  * But such confirmation won't happen when eb/ip/nftables rules dropped the
595  * packets before they reached the nf_confirm step in postrouting.
596  *
597  * Work around this problem by explicit confirmation of the entry at
598  * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
599  * entry.
600  *
601  */
br_nf_local_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)602 static unsigned int br_nf_local_in(void *priv,
603 				   struct sk_buff *skb,
604 				   const struct nf_hook_state *state)
605 {
606 	bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
607 	struct nf_conntrack *nfct = skb_nfct(skb);
608 	const struct nf_ct_hook *ct_hook;
609 	struct nf_conn *ct;
610 	int ret;
611 
612 	if (promisc) {
613 		nf_reset_ct(skb);
614 		return NF_ACCEPT;
615 	}
616 
617 	if (!nfct || skb->pkt_type == PACKET_HOST)
618 		return NF_ACCEPT;
619 
620 	ct = container_of(nfct, struct nf_conn, ct_general);
621 	if (likely(nf_ct_is_confirmed(ct)))
622 		return NF_ACCEPT;
623 
624 	if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
625 		nf_reset_ct(skb);
626 		return NF_ACCEPT;
627 	}
628 
629 	WARN_ON_ONCE(skb_shared(skb));
630 
631 	/* We can't call nf_confirm here, it would create a dependency
632 	 * on nf_conntrack module.
633 	 */
634 	ct_hook = rcu_dereference(nf_ct_hook);
635 	if (!ct_hook) {
636 		skb->_nfct = 0ul;
637 		nf_conntrack_put(nfct);
638 		return NF_ACCEPT;
639 	}
640 
641 	nf_bridge_pull_encap_header(skb);
642 	ret = ct_hook->confirm(skb);
643 	switch (ret & NF_VERDICT_MASK) {
644 	case NF_STOLEN:
645 		return NF_STOLEN;
646 	default:
647 		nf_bridge_push_encap_header(skb);
648 		break;
649 	}
650 
651 	ct = container_of(nfct, struct nf_conn, ct_general);
652 	WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
653 
654 	return ret;
655 }
656 #endif
657 
658 /* PF_BRIDGE/FORWARD *************************************************/
br_nf_forward_finish(struct net * net,struct sock * sk,struct sk_buff * skb)659 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
660 {
661 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
662 	struct net_device *in;
663 
664 	if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
665 
666 		if (skb->protocol == htons(ETH_P_IP))
667 			nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
668 
669 		if (skb->protocol == htons(ETH_P_IPV6))
670 			nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
671 
672 		in = nf_bridge_get_physindev(skb, net);
673 		if (!in) {
674 			kfree_skb(skb);
675 			return 0;
676 		}
677 		if (nf_bridge->pkt_otherhost) {
678 			skb->pkt_type = PACKET_OTHERHOST;
679 			nf_bridge->pkt_otherhost = false;
680 		}
681 		nf_bridge_update_protocol(skb);
682 	} else {
683 		in = *((struct net_device **)(skb->cb));
684 	}
685 	nf_bridge_push_encap_header(skb);
686 
687 	br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
688 			  br_forward_finish);
689 	return 0;
690 }
691 
692 
693 /* This is the 'purely bridged' case.  For IP, we pass the packet to
694  * netfilter with indev and outdev set to the bridge device,
695  * but we are still able to filter on the 'real' indev/outdev
696  * because of the physdev module. For ARP, indev and outdev are the
697  * bridge ports. */
br_nf_forward_ip(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)698 static unsigned int br_nf_forward_ip(void *priv,
699 				     struct sk_buff *skb,
700 				     const struct nf_hook_state *state)
701 {
702 	struct nf_bridge_info *nf_bridge;
703 	struct net_device *parent;
704 	u_int8_t pf;
705 
706 	nf_bridge = nf_bridge_info_get(skb);
707 	if (!nf_bridge)
708 		return NF_ACCEPT;
709 
710 	/* Need exclusive nf_bridge_info since we might have multiple
711 	 * different physoutdevs. */
712 	if (!nf_bridge_unshare(skb))
713 		return NF_DROP;
714 
715 	nf_bridge = nf_bridge_info_get(skb);
716 	if (!nf_bridge)
717 		return NF_DROP;
718 
719 	parent = bridge_parent(state->out);
720 	if (!parent)
721 		return NF_DROP;
722 
723 	if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
724 	    is_pppoe_ip(skb, state->net))
725 		pf = NFPROTO_IPV4;
726 	else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
727 		 is_pppoe_ipv6(skb, state->net))
728 		pf = NFPROTO_IPV6;
729 	else
730 		return NF_ACCEPT;
731 
732 	nf_bridge_pull_encap_header(skb);
733 
734 	if (skb->pkt_type == PACKET_OTHERHOST) {
735 		skb->pkt_type = PACKET_HOST;
736 		nf_bridge->pkt_otherhost = true;
737 	}
738 
739 	if (pf == NFPROTO_IPV4) {
740 		if (br_validate_ipv4(state->net, skb))
741 			return NF_DROP;
742 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
743 	}
744 
745 	if (pf == NFPROTO_IPV6) {
746 		if (br_validate_ipv6(state->net, skb))
747 			return NF_DROP;
748 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
749 	}
750 
751 	nf_bridge->physoutdev = skb->dev;
752 	if (pf == NFPROTO_IPV4)
753 		skb->protocol = htons(ETH_P_IP);
754 	else
755 		skb->protocol = htons(ETH_P_IPV6);
756 
757 	NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
758 		brnf_get_logical_dev(skb, state->in, state->net),
759 		parent,	br_nf_forward_finish);
760 
761 	return NF_STOLEN;
762 }
763 
br_nf_forward_arp(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)764 static unsigned int br_nf_forward_arp(void *priv,
765 				      struct sk_buff *skb,
766 				      const struct nf_hook_state *state)
767 {
768 	struct net_bridge_port *p;
769 	struct net_bridge *br;
770 	struct net_device **d = (struct net_device **)(skb->cb);
771 	struct brnf_net *brnet;
772 
773 	p = br_port_get_rcu(state->out);
774 	if (p == NULL)
775 		return NF_ACCEPT;
776 	br = p->br;
777 
778 	brnet = net_generic(state->net, brnf_net_id);
779 	if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
780 		return NF_ACCEPT;
781 
782 	if (!IS_ARP(skb)) {
783 		if (!is_vlan_arp(skb, state->net))
784 			return NF_ACCEPT;
785 		nf_bridge_pull_encap_header(skb);
786 	}
787 
788 	if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
789 		return NF_DROP;
790 
791 	if (arp_hdr(skb)->ar_pln != 4) {
792 		if (is_vlan_arp(skb, state->net))
793 			nf_bridge_push_encap_header(skb);
794 		return NF_ACCEPT;
795 	}
796 	*d = state->in;
797 	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
798 		state->in, state->out, br_nf_forward_finish);
799 
800 	return NF_STOLEN;
801 }
802 
br_nf_push_frag_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)803 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
804 {
805 	struct brnf_frag_data *data;
806 	int err;
807 
808 	data = this_cpu_ptr(&brnf_frag_data_storage);
809 	err = skb_cow_head(skb, data->size);
810 
811 	if (err) {
812 		kfree_skb(skb);
813 		return 0;
814 	}
815 
816 	if (data->vlan_proto)
817 		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
818 
819 	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
820 	__skb_push(skb, data->encap_size);
821 
822 	nf_bridge_info_free(skb);
823 	return br_dev_queue_push_xmit(net, sk, skb);
824 }
825 
826 static int
br_nf_ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))827 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
828 		  int (*output)(struct net *, struct sock *, struct sk_buff *))
829 {
830 	unsigned int mtu = ip_skb_dst_mtu(sk, skb);
831 	struct iphdr *iph = ip_hdr(skb);
832 
833 	if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
834 		     (IPCB(skb)->frag_max_size &&
835 		      IPCB(skb)->frag_max_size > mtu))) {
836 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
837 		kfree_skb(skb);
838 		return -EMSGSIZE;
839 	}
840 
841 	return ip_do_fragment(net, sk, skb, output);
842 }
843 
nf_bridge_mtu_reduction(const struct sk_buff * skb)844 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
845 {
846 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
847 
848 	if (nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
849 		return PPPOE_SES_HLEN;
850 	return 0;
851 }
852 
br_nf_dev_queue_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)853 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
854 {
855 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
856 	unsigned int mtu, mtu_reserved;
857 
858 	mtu_reserved = nf_bridge_mtu_reduction(skb);
859 	mtu = skb->dev->mtu;
860 
861 	if (nf_bridge->pkt_otherhost) {
862 		skb->pkt_type = PACKET_OTHERHOST;
863 		nf_bridge->pkt_otherhost = false;
864 	}
865 
866 	if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
867 		mtu = nf_bridge->frag_max_size;
868 
869 	nf_bridge_update_protocol(skb);
870 	nf_bridge_push_encap_header(skb);
871 
872 	if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
873 		nf_bridge_info_free(skb);
874 		return br_dev_queue_push_xmit(net, sk, skb);
875 	}
876 
877 	/* Fragmentation on metadata/template dst is not supported */
878 	if (unlikely(!skb_valid_dst(skb)))
879 		goto drop;
880 
881 	/* This is wrong! We should preserve the original fragment
882 	 * boundaries by preserving frag_list rather than refragmenting.
883 	 */
884 	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
885 	    skb->protocol == htons(ETH_P_IP)) {
886 		struct brnf_frag_data *data;
887 
888 		if (br_validate_ipv4(net, skb))
889 			goto drop;
890 
891 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
892 
893 		data = this_cpu_ptr(&brnf_frag_data_storage);
894 
895 		if (skb_vlan_tag_present(skb)) {
896 			data->vlan_tci = skb->vlan_tci;
897 			data->vlan_proto = skb->vlan_proto;
898 		} else {
899 			data->vlan_proto = 0;
900 		}
901 
902 		data->encap_size = nf_bridge_encap_header_len(skb);
903 		data->size = ETH_HLEN + data->encap_size;
904 
905 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
906 						 data->size);
907 
908 		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
909 	}
910 	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
911 	    skb->protocol == htons(ETH_P_IPV6)) {
912 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
913 		struct brnf_frag_data *data;
914 
915 		if (br_validate_ipv6(net, skb))
916 			goto drop;
917 
918 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
919 
920 		data = this_cpu_ptr(&brnf_frag_data_storage);
921 		data->encap_size = nf_bridge_encap_header_len(skb);
922 		data->size = ETH_HLEN + data->encap_size;
923 
924 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
925 						 data->size);
926 
927 		if (v6ops)
928 			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
929 
930 		kfree_skb(skb);
931 		return -EMSGSIZE;
932 	}
933 	nf_bridge_info_free(skb);
934 	return br_dev_queue_push_xmit(net, sk, skb);
935  drop:
936 	kfree_skb(skb);
937 	return 0;
938 }
939 
940 /* PF_BRIDGE/POST_ROUTING ********************************************/
br_nf_post_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)941 static unsigned int br_nf_post_routing(void *priv,
942 				       struct sk_buff *skb,
943 				       const struct nf_hook_state *state)
944 {
945 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
946 	struct net_device *realoutdev = bridge_parent(skb->dev);
947 	u_int8_t pf;
948 
949 	/* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
950 	 * on a bridge, but was delivered locally and is now being routed:
951 	 *
952 	 * POST_ROUTING was already invoked from the ip stack.
953 	 */
954 	if (!nf_bridge || !nf_bridge->physoutdev)
955 		return NF_ACCEPT;
956 
957 	if (!realoutdev)
958 		return NF_DROP;
959 
960 	if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
961 	    is_pppoe_ip(skb, state->net))
962 		pf = NFPROTO_IPV4;
963 	else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
964 		 is_pppoe_ipv6(skb, state->net))
965 		pf = NFPROTO_IPV6;
966 	else
967 		return NF_ACCEPT;
968 
969 	if (skb->pkt_type == PACKET_OTHERHOST) {
970 		skb->pkt_type = PACKET_HOST;
971 		nf_bridge->pkt_otherhost = true;
972 	}
973 
974 	nf_bridge_pull_encap_header(skb);
975 	if (pf == NFPROTO_IPV4)
976 		skb->protocol = htons(ETH_P_IP);
977 	else
978 		skb->protocol = htons(ETH_P_IPV6);
979 
980 	NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
981 		NULL, realoutdev,
982 		br_nf_dev_queue_xmit);
983 
984 	return NF_STOLEN;
985 }
986 
987 /* IP/SABOTAGE *****************************************************/
988 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
989  * for the second time. */
ip_sabotage_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)990 static unsigned int ip_sabotage_in(void *priv,
991 				   struct sk_buff *skb,
992 				   const struct nf_hook_state *state)
993 {
994 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
995 
996 	if (nf_bridge) {
997 		if (nf_bridge->sabotage_in_done)
998 			return NF_ACCEPT;
999 
1000 		if (!nf_bridge->in_prerouting &&
1001 		    !netif_is_l3_master(skb->dev) &&
1002 		    !netif_is_l3_slave(skb->dev)) {
1003 			nf_bridge->sabotage_in_done = 1;
1004 			state->okfn(state->net, state->sk, skb);
1005 			return NF_STOLEN;
1006 		}
1007 	}
1008 
1009 	return NF_ACCEPT;
1010 }
1011 
1012 /* This is called when br_netfilter has called into iptables/netfilter,
1013  * and DNAT has taken place on a bridge-forwarded packet.
1014  *
1015  * neigh->output has created a new MAC header, with local br0 MAC
1016  * as saddr.
1017  *
1018  * This restores the original MAC saddr of the bridged packet
1019  * before invoking bridge forward logic to transmit the packet.
1020  */
br_nf_pre_routing_finish_bridge_slow(struct sk_buff * skb)1021 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
1022 {
1023 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1024 	struct net_device *br_indev;
1025 
1026 	br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
1027 	if (!br_indev) {
1028 		kfree_skb(skb);
1029 		return;
1030 	}
1031 
1032 	skb_pull(skb, ETH_HLEN);
1033 	nf_bridge->bridged_dnat = 0;
1034 
1035 	BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
1036 
1037 	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
1038 				       nf_bridge->neigh_header,
1039 				       ETH_HLEN - ETH_ALEN);
1040 	skb->dev = br_indev;
1041 
1042 	nf_bridge->physoutdev = NULL;
1043 	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
1044 }
1045 
br_nf_dev_xmit(struct sk_buff * skb)1046 static int br_nf_dev_xmit(struct sk_buff *skb)
1047 {
1048 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1049 
1050 	if (nf_bridge && nf_bridge->bridged_dnat) {
1051 		br_nf_pre_routing_finish_bridge_slow(skb);
1052 		return 1;
1053 	}
1054 	return 0;
1055 }
1056 
1057 static const struct nf_br_ops br_ops = {
1058 	.br_dev_xmit_hook =	br_nf_dev_xmit,
1059 };
1060 
1061 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
1062  * br_dev_queue_push_xmit is called afterwards */
1063 static const struct nf_hook_ops br_nf_ops[] = {
1064 	{
1065 		.hook = br_nf_pre_routing,
1066 		.pf = NFPROTO_BRIDGE,
1067 		.hooknum = NF_BR_PRE_ROUTING,
1068 		.priority = NF_BR_PRI_BRNF,
1069 	},
1070 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1071 	{
1072 		.hook = br_nf_local_in,
1073 		.pf = NFPROTO_BRIDGE,
1074 		.hooknum = NF_BR_LOCAL_IN,
1075 		.priority = NF_BR_PRI_LAST,
1076 	},
1077 #endif
1078 	{
1079 		.hook = br_nf_forward_ip,
1080 		.pf = NFPROTO_BRIDGE,
1081 		.hooknum = NF_BR_FORWARD,
1082 		.priority = NF_BR_PRI_BRNF - 1,
1083 	},
1084 	{
1085 		.hook = br_nf_forward_arp,
1086 		.pf = NFPROTO_BRIDGE,
1087 		.hooknum = NF_BR_FORWARD,
1088 		.priority = NF_BR_PRI_BRNF,
1089 	},
1090 	{
1091 		.hook = br_nf_post_routing,
1092 		.pf = NFPROTO_BRIDGE,
1093 		.hooknum = NF_BR_POST_ROUTING,
1094 		.priority = NF_BR_PRI_LAST,
1095 	},
1096 	{
1097 		.hook = ip_sabotage_in,
1098 		.pf = NFPROTO_IPV4,
1099 		.hooknum = NF_INET_PRE_ROUTING,
1100 		.priority = NF_IP_PRI_FIRST,
1101 	},
1102 	{
1103 		.hook = ip_sabotage_in,
1104 		.pf = NFPROTO_IPV6,
1105 		.hooknum = NF_INET_PRE_ROUTING,
1106 		.priority = NF_IP6_PRI_FIRST,
1107 	},
1108 };
1109 
brnf_device_event(struct notifier_block * unused,unsigned long event,void * ptr)1110 static int brnf_device_event(struct notifier_block *unused, unsigned long event,
1111 			     void *ptr)
1112 {
1113 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1114 	struct brnf_net *brnet;
1115 	struct net *net;
1116 	int ret;
1117 
1118 	if (event != NETDEV_REGISTER || !netif_is_bridge_master(dev))
1119 		return NOTIFY_DONE;
1120 
1121 	ASSERT_RTNL();
1122 
1123 	net = dev_net(dev);
1124 	brnet = net_generic(net, brnf_net_id);
1125 	if (brnet->enabled)
1126 		return NOTIFY_OK;
1127 
1128 	ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1129 	if (ret)
1130 		return NOTIFY_BAD;
1131 
1132 	brnet->enabled = true;
1133 	return NOTIFY_OK;
1134 }
1135 
1136 static struct notifier_block brnf_notifier __read_mostly = {
1137 	.notifier_call = brnf_device_event,
1138 };
1139 
1140 /* recursively invokes nf_hook_slow (again), skipping already-called
1141  * hooks (< NF_BR_PRI_BRNF).
1142  *
1143  * Called with rcu read lock held.
1144  */
br_nf_hook_thresh(unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))1145 int br_nf_hook_thresh(unsigned int hook, struct net *net,
1146 		      struct sock *sk, struct sk_buff *skb,
1147 		      struct net_device *indev,
1148 		      struct net_device *outdev,
1149 		      int (*okfn)(struct net *, struct sock *,
1150 				  struct sk_buff *))
1151 {
1152 	const struct nf_hook_entries *e;
1153 	struct nf_hook_state state;
1154 	struct nf_hook_ops **ops;
1155 	unsigned int i;
1156 	int ret;
1157 
1158 	e = rcu_dereference(net->nf.hooks_bridge[hook]);
1159 	if (!e)
1160 		return okfn(net, sk, skb);
1161 
1162 	ops = nf_hook_entries_get_hook_ops(e);
1163 	for (i = 0; i < e->num_hook_entries; i++) {
1164 		/* These hooks have already been called */
1165 		if (ops[i]->priority < NF_BR_PRI_BRNF)
1166 			continue;
1167 
1168 		/* These hooks have not been called yet, run them. */
1169 		if (ops[i]->priority > NF_BR_PRI_BRNF)
1170 			break;
1171 
1172 		/* take a closer look at NF_BR_PRI_BRNF. */
1173 		if (ops[i]->hook == br_nf_pre_routing) {
1174 			/* This hook diverted the skb to this function,
1175 			 * hooks after this have not been run yet.
1176 			 */
1177 			i++;
1178 			break;
1179 		}
1180 	}
1181 
1182 	nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1183 			   sk, net, okfn);
1184 
1185 	ret = nf_hook_slow(skb, &state, e, i);
1186 	if (ret == 1)
1187 		ret = okfn(net, sk, skb);
1188 
1189 	return ret;
1190 }
1191 
1192 #ifdef CONFIG_SYSCTL
1193 static
brnf_sysctl_call_tables(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)1194 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1195 			    void *buffer, size_t *lenp, loff_t *ppos)
1196 {
1197 	int ret;
1198 
1199 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1200 
1201 	if (write && *(int *)(ctl->data))
1202 		*(int *)(ctl->data) = 1;
1203 	return ret;
1204 }
1205 
1206 static struct ctl_table brnf_table[] = {
1207 	{
1208 		.procname	= "bridge-nf-call-arptables",
1209 		.maxlen		= sizeof(int),
1210 		.mode		= 0644,
1211 		.proc_handler	= brnf_sysctl_call_tables,
1212 	},
1213 	{
1214 		.procname	= "bridge-nf-call-iptables",
1215 		.maxlen		= sizeof(int),
1216 		.mode		= 0644,
1217 		.proc_handler	= brnf_sysctl_call_tables,
1218 	},
1219 	{
1220 		.procname	= "bridge-nf-call-ip6tables",
1221 		.maxlen		= sizeof(int),
1222 		.mode		= 0644,
1223 		.proc_handler	= brnf_sysctl_call_tables,
1224 	},
1225 	{
1226 		.procname	= "bridge-nf-filter-vlan-tagged",
1227 		.maxlen		= sizeof(int),
1228 		.mode		= 0644,
1229 		.proc_handler	= brnf_sysctl_call_tables,
1230 	},
1231 	{
1232 		.procname	= "bridge-nf-filter-pppoe-tagged",
1233 		.maxlen		= sizeof(int),
1234 		.mode		= 0644,
1235 		.proc_handler	= brnf_sysctl_call_tables,
1236 	},
1237 	{
1238 		.procname	= "bridge-nf-pass-vlan-input-dev",
1239 		.maxlen		= sizeof(int),
1240 		.mode		= 0644,
1241 		.proc_handler	= brnf_sysctl_call_tables,
1242 	},
1243 	{ }
1244 };
1245 
br_netfilter_sysctl_default(struct brnf_net * brnf)1246 static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
1247 {
1248 	brnf->call_iptables = 1;
1249 	brnf->call_ip6tables = 1;
1250 	brnf->call_arptables = 1;
1251 	brnf->filter_vlan_tagged = 0;
1252 	brnf->filter_pppoe_tagged = 0;
1253 	brnf->pass_vlan_indev = 0;
1254 }
1255 
br_netfilter_sysctl_init_net(struct net * net)1256 static int br_netfilter_sysctl_init_net(struct net *net)
1257 {
1258 	struct ctl_table *table = brnf_table;
1259 	struct brnf_net *brnet;
1260 
1261 	if (!net_eq(net, &init_net)) {
1262 		table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
1263 		if (!table)
1264 			return -ENOMEM;
1265 	}
1266 
1267 	brnet = net_generic(net, brnf_net_id);
1268 	table[0].data = &brnet->call_arptables;
1269 	table[1].data = &brnet->call_iptables;
1270 	table[2].data = &brnet->call_ip6tables;
1271 	table[3].data = &brnet->filter_vlan_tagged;
1272 	table[4].data = &brnet->filter_pppoe_tagged;
1273 	table[5].data = &brnet->pass_vlan_indev;
1274 
1275 	br_netfilter_sysctl_default(brnet);
1276 
1277 	brnet->ctl_hdr = register_net_sysctl_sz(net, "net/bridge", table,
1278 						ARRAY_SIZE(brnf_table));
1279 	if (!brnet->ctl_hdr) {
1280 		if (!net_eq(net, &init_net))
1281 			kfree(table);
1282 
1283 		return -ENOMEM;
1284 	}
1285 
1286 	return 0;
1287 }
1288 
br_netfilter_sysctl_exit_net(struct net * net,struct brnf_net * brnet)1289 static void br_netfilter_sysctl_exit_net(struct net *net,
1290 					 struct brnf_net *brnet)
1291 {
1292 	struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
1293 
1294 	unregister_net_sysctl_table(brnet->ctl_hdr);
1295 	if (!net_eq(net, &init_net))
1296 		kfree(table);
1297 }
1298 
brnf_init_net(struct net * net)1299 static int __net_init brnf_init_net(struct net *net)
1300 {
1301 	return br_netfilter_sysctl_init_net(net);
1302 }
1303 #endif
1304 
brnf_exit_net(struct net * net)1305 static void __net_exit brnf_exit_net(struct net *net)
1306 {
1307 	struct brnf_net *brnet;
1308 
1309 	brnet = net_generic(net, brnf_net_id);
1310 	if (brnet->enabled) {
1311 		nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1312 		brnet->enabled = false;
1313 	}
1314 
1315 #ifdef CONFIG_SYSCTL
1316 	br_netfilter_sysctl_exit_net(net, brnet);
1317 #endif
1318 }
1319 
1320 static struct pernet_operations brnf_net_ops __read_mostly = {
1321 #ifdef CONFIG_SYSCTL
1322 	.init = brnf_init_net,
1323 #endif
1324 	.exit = brnf_exit_net,
1325 	.id   = &brnf_net_id,
1326 	.size = sizeof(struct brnf_net),
1327 };
1328 
br_netfilter_init(void)1329 static int __init br_netfilter_init(void)
1330 {
1331 	int ret;
1332 
1333 	ret = register_pernet_subsys(&brnf_net_ops);
1334 	if (ret < 0)
1335 		return ret;
1336 
1337 	ret = register_netdevice_notifier(&brnf_notifier);
1338 	if (ret < 0) {
1339 		unregister_pernet_subsys(&brnf_net_ops);
1340 		return ret;
1341 	}
1342 
1343 	RCU_INIT_POINTER(nf_br_ops, &br_ops);
1344 	printk(KERN_NOTICE "Bridge firewalling registered\n");
1345 	return 0;
1346 }
1347 
br_netfilter_fini(void)1348 static void __exit br_netfilter_fini(void)
1349 {
1350 	RCU_INIT_POINTER(nf_br_ops, NULL);
1351 	unregister_netdevice_notifier(&brnf_notifier);
1352 	unregister_pernet_subsys(&brnf_net_ops);
1353 }
1354 
1355 module_init(br_netfilter_init);
1356 module_exit(br_netfilter_fini);
1357 
1358 MODULE_LICENSE("GPL");
1359 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1360 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1361 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
1362