1 /*
2  *	Handle firewalling
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *	Bart De Schuymer		<bdschuym@pandora.be>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *	modify it under the terms of the GNU General Public License
11  *	as published by the Free Software Foundation; either version
12  *	2 of the License, or (at your option) any later version.
13  *
14  *	Lennert dedicates this file to Kerstin Wurdinger.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/rculist.h>
34 #include <linux/inetdevice.h>
35 
36 #include <net/ip.h>
37 #include <net/ipv6.h>
38 #include <net/addrconf.h>
39 #include <net/route.h>
40 #include <net/netfilter/br_netfilter.h>
41 #include <net/netns/generic.h>
42 
43 #include <linux/uaccess.h>
44 #include "br_private.h"
45 #ifdef CONFIG_SYSCTL
46 #include <linux/sysctl.h>
47 #endif
48 
49 static unsigned int brnf_net_id __read_mostly;
50 
51 struct brnf_net {
52 	bool enabled;
53 };
54 
55 #ifdef CONFIG_SYSCTL
56 static struct ctl_table_header *brnf_sysctl_header;
57 static int brnf_call_iptables __read_mostly = 1;
58 static int brnf_call_ip6tables __read_mostly = 1;
59 static int brnf_call_arptables __read_mostly = 1;
60 static int brnf_filter_vlan_tagged __read_mostly;
61 static int brnf_filter_pppoe_tagged __read_mostly;
62 static int brnf_pass_vlan_indev __read_mostly;
63 #else
64 #define brnf_call_iptables 1
65 #define brnf_call_ip6tables 1
66 #define brnf_call_arptables 1
67 #define brnf_filter_vlan_tagged 0
68 #define brnf_filter_pppoe_tagged 0
69 #define brnf_pass_vlan_indev 0
70 #endif
71 
72 #define IS_IP(skb) \
73 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
74 
75 #define IS_IPV6(skb) \
76 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
77 
78 #define IS_ARP(skb) \
79 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
80 
81 static inline __be16 vlan_proto(const struct sk_buff *skb)
82 {
83 	if (skb_vlan_tag_present(skb))
84 		return skb->protocol;
85 	else if (skb->protocol == htons(ETH_P_8021Q))
86 		return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
87 	else
88 		return 0;
89 }
90 
91 #define IS_VLAN_IP(skb) \
92 	(vlan_proto(skb) == htons(ETH_P_IP) && \
93 	 brnf_filter_vlan_tagged)
94 
95 #define IS_VLAN_IPV6(skb) \
96 	(vlan_proto(skb) == htons(ETH_P_IPV6) && \
97 	 brnf_filter_vlan_tagged)
98 
99 #define IS_VLAN_ARP(skb) \
100 	(vlan_proto(skb) == htons(ETH_P_ARP) &&	\
101 	 brnf_filter_vlan_tagged)
102 
103 static inline __be16 pppoe_proto(const struct sk_buff *skb)
104 {
105 	return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
106 			    sizeof(struct pppoe_hdr)));
107 }
108 
109 #define IS_PPPOE_IP(skb) \
110 	(skb->protocol == htons(ETH_P_PPP_SES) && \
111 	 pppoe_proto(skb) == htons(PPP_IP) && \
112 	 brnf_filter_pppoe_tagged)
113 
114 #define IS_PPPOE_IPV6(skb) \
115 	(skb->protocol == htons(ETH_P_PPP_SES) && \
116 	 pppoe_proto(skb) == htons(PPP_IPV6) && \
117 	 brnf_filter_pppoe_tagged)
118 
119 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
120 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
121 
122 struct brnf_frag_data {
123 	char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
124 	u8 encap_size;
125 	u8 size;
126 	u16 vlan_tci;
127 	__be16 vlan_proto;
128 };
129 
130 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
131 
132 static void nf_bridge_info_free(struct sk_buff *skb)
133 {
134 	if (skb->nf_bridge) {
135 		nf_bridge_put(skb->nf_bridge);
136 		skb->nf_bridge = NULL;
137 	}
138 }
139 
140 static inline struct net_device *bridge_parent(const struct net_device *dev)
141 {
142 	struct net_bridge_port *port;
143 
144 	port = br_port_get_rcu(dev);
145 	return port ? port->br->dev : NULL;
146 }
147 
148 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
149 {
150 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
151 
152 	if (refcount_read(&nf_bridge->use) > 1) {
153 		struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
154 
155 		if (tmp) {
156 			memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
157 			refcount_set(&tmp->use, 1);
158 		}
159 		nf_bridge_put(nf_bridge);
160 		nf_bridge = tmp;
161 	}
162 	return nf_bridge;
163 }
164 
165 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
166 {
167 	switch (skb->protocol) {
168 	case __cpu_to_be16(ETH_P_8021Q):
169 		return VLAN_HLEN;
170 	case __cpu_to_be16(ETH_P_PPP_SES):
171 		return PPPOE_SES_HLEN;
172 	default:
173 		return 0;
174 	}
175 }
176 
177 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
178 {
179 	unsigned int len = nf_bridge_encap_header_len(skb);
180 
181 	skb_pull(skb, len);
182 	skb->network_header += len;
183 }
184 
185 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
186 {
187 	unsigned int len = nf_bridge_encap_header_len(skb);
188 
189 	skb_pull_rcsum(skb, len);
190 	skb->network_header += len;
191 }
192 
193 /* When handing a packet over to the IP layer
194  * check whether we have a skb that is in the
195  * expected format
196  */
197 
198 static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
199 {
200 	const struct iphdr *iph;
201 	u32 len;
202 
203 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
204 		goto inhdr_error;
205 
206 	iph = ip_hdr(skb);
207 
208 	/* Basic sanity checks */
209 	if (iph->ihl < 5 || iph->version != 4)
210 		goto inhdr_error;
211 
212 	if (!pskb_may_pull(skb, iph->ihl*4))
213 		goto inhdr_error;
214 
215 	iph = ip_hdr(skb);
216 	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
217 		goto csum_error;
218 
219 	len = ntohs(iph->tot_len);
220 	if (skb->len < len) {
221 		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
222 		goto drop;
223 	} else if (len < (iph->ihl*4))
224 		goto inhdr_error;
225 
226 	if (pskb_trim_rcsum(skb, len)) {
227 		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
228 		goto drop;
229 	}
230 
231 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
232 	/* We should really parse IP options here but until
233 	 * somebody who actually uses IP options complains to
234 	 * us we'll just silently ignore the options because
235 	 * we're lazy!
236 	 */
237 	return 0;
238 
239 csum_error:
240 	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
241 inhdr_error:
242 	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
243 drop:
244 	return -1;
245 }
246 
247 void nf_bridge_update_protocol(struct sk_buff *skb)
248 {
249 	switch (skb->nf_bridge->orig_proto) {
250 	case BRNF_PROTO_8021Q:
251 		skb->protocol = htons(ETH_P_8021Q);
252 		break;
253 	case BRNF_PROTO_PPPOE:
254 		skb->protocol = htons(ETH_P_PPP_SES);
255 		break;
256 	case BRNF_PROTO_UNCHANGED:
257 		break;
258 	}
259 }
260 
261 /* Obtain the correct destination MAC address, while preserving the original
262  * source MAC address. If we already know this address, we just copy it. If we
263  * don't, we use the neighbour framework to find out. In both cases, we make
264  * sure that br_handle_frame_finish() is called afterwards.
265  */
266 int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
267 {
268 	struct neighbour *neigh;
269 	struct dst_entry *dst;
270 
271 	skb->dev = bridge_parent(skb->dev);
272 	if (!skb->dev)
273 		goto free_skb;
274 	dst = skb_dst(skb);
275 	neigh = dst_neigh_lookup_skb(dst, skb);
276 	if (neigh) {
277 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
278 		int ret;
279 
280 		if (neigh->hh.hh_len) {
281 			neigh_hh_bridge(&neigh->hh, skb);
282 			skb->dev = nf_bridge->physindev;
283 			ret = br_handle_frame_finish(net, sk, skb);
284 		} else {
285 			/* the neighbour function below overwrites the complete
286 			 * MAC header, so we save the Ethernet source address and
287 			 * protocol number.
288 			 */
289 			skb_copy_from_linear_data_offset(skb,
290 							 -(ETH_HLEN-ETH_ALEN),
291 							 nf_bridge->neigh_header,
292 							 ETH_HLEN-ETH_ALEN);
293 			/* tell br_dev_xmit to continue with forwarding */
294 			nf_bridge->bridged_dnat = 1;
295 			/* FIXME Need to refragment */
296 			ret = neigh->output(neigh, skb);
297 		}
298 		neigh_release(neigh);
299 		return ret;
300 	}
301 free_skb:
302 	kfree_skb(skb);
303 	return 0;
304 }
305 
306 static inline bool
307 br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
308 			     const struct nf_bridge_info *nf_bridge)
309 {
310 	return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
311 }
312 
313 /* This requires some explaining. If DNAT has taken place,
314  * we will need to fix up the destination Ethernet address.
315  * This is also true when SNAT takes place (for the reply direction).
316  *
317  * There are two cases to consider:
318  * 1. The packet was DNAT'ed to a device in the same bridge
319  *    port group as it was received on. We can still bridge
320  *    the packet.
321  * 2. The packet was DNAT'ed to a different device, either
322  *    a non-bridged device or another bridge port group.
323  *    The packet will need to be routed.
324  *
325  * The correct way of distinguishing between these two cases is to
326  * call ip_route_input() and to look at skb->dst->dev, which is
327  * changed to the destination device if ip_route_input() succeeds.
328  *
329  * Let's first consider the case that ip_route_input() succeeds:
330  *
331  * If the output device equals the logical bridge device the packet
332  * came in on, we can consider this bridging. The corresponding MAC
333  * address will be obtained in br_nf_pre_routing_finish_bridge.
334  * Otherwise, the packet is considered to be routed and we just
335  * change the destination MAC address so that the packet will
336  * later be passed up to the IP stack to be routed. For a redirected
337  * packet, ip_route_input() will give back the localhost as output device,
338  * which differs from the bridge device.
339  *
340  * Let's now consider the case that ip_route_input() fails:
341  *
342  * This can be because the destination address is martian, in which case
343  * the packet will be dropped.
344  * If IP forwarding is disabled, ip_route_input() will fail, while
345  * ip_route_output_key() can return success. The source
346  * address for ip_route_output_key() is set to zero, so ip_route_output_key()
347  * thinks we're handling a locally generated packet and won't care
348  * if IP forwarding is enabled. If the output device equals the logical bridge
349  * device, we proceed as if ip_route_input() succeeded. If it differs from the
350  * logical bridge port or if ip_route_output_key() fails we drop the packet.
351  */
352 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
353 {
354 	struct net_device *dev = skb->dev;
355 	struct iphdr *iph = ip_hdr(skb);
356 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
357 	struct rtable *rt;
358 	int err;
359 
360 	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
361 
362 	if (nf_bridge->pkt_otherhost) {
363 		skb->pkt_type = PACKET_OTHERHOST;
364 		nf_bridge->pkt_otherhost = false;
365 	}
366 	nf_bridge->in_prerouting = 0;
367 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
368 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
369 			struct in_device *in_dev = __in_dev_get_rcu(dev);
370 
371 			/* If err equals -EHOSTUNREACH the error is due to a
372 			 * martian destination or due to the fact that
373 			 * forwarding is disabled. For most martian packets,
374 			 * ip_route_output_key() will fail. It won't fail for 2 types of
375 			 * martian destinations: loopback destinations and destination
376 			 * 0.0.0.0. In both cases the packet will be dropped because the
377 			 * destination is the loopback device and not the bridge. */
378 			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
379 				goto free_skb;
380 
381 			rt = ip_route_output(net, iph->daddr, 0,
382 					     RT_TOS(iph->tos), 0);
383 			if (!IS_ERR(rt)) {
384 				/* - Bridged-and-DNAT'ed traffic doesn't
385 				 *   require ip_forwarding. */
386 				if (rt->dst.dev == dev) {
387 					skb_dst_set(skb, &rt->dst);
388 					goto bridged_dnat;
389 				}
390 				ip_rt_put(rt);
391 			}
392 free_skb:
393 			kfree_skb(skb);
394 			return 0;
395 		} else {
396 			if (skb_dst(skb)->dev == dev) {
397 bridged_dnat:
398 				skb->dev = nf_bridge->physindev;
399 				nf_bridge_update_protocol(skb);
400 				nf_bridge_push_encap_header(skb);
401 				br_nf_hook_thresh(NF_BR_PRE_ROUTING,
402 						  net, sk, skb, skb->dev,
403 						  NULL,
404 						  br_nf_pre_routing_finish_bridge);
405 				return 0;
406 			}
407 			ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
408 			skb->pkt_type = PACKET_HOST;
409 		}
410 	} else {
411 		rt = bridge_parent_rtable(nf_bridge->physindev);
412 		if (!rt) {
413 			kfree_skb(skb);
414 			return 0;
415 		}
416 		skb_dst_set_noref(skb, &rt->dst);
417 	}
418 
419 	skb->dev = nf_bridge->physindev;
420 	nf_bridge_update_protocol(skb);
421 	nf_bridge_push_encap_header(skb);
422 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
423 			  br_handle_frame_finish);
424 	return 0;
425 }
426 
427 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
428 {
429 	struct net_device *vlan, *br;
430 
431 	br = bridge_parent(dev);
432 	if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
433 		return br;
434 
435 	vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
436 				    skb_vlan_tag_get(skb) & VLAN_VID_MASK);
437 
438 	return vlan ? vlan : br;
439 }
440 
441 /* Some common code for IPv4/IPv6 */
442 struct net_device *setup_pre_routing(struct sk_buff *skb)
443 {
444 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
445 
446 	if (skb->pkt_type == PACKET_OTHERHOST) {
447 		skb->pkt_type = PACKET_HOST;
448 		nf_bridge->pkt_otherhost = true;
449 	}
450 
451 	nf_bridge->in_prerouting = 1;
452 	nf_bridge->physindev = skb->dev;
453 	skb->dev = brnf_get_logical_dev(skb, skb->dev);
454 
455 	if (skb->protocol == htons(ETH_P_8021Q))
456 		nf_bridge->orig_proto = BRNF_PROTO_8021Q;
457 	else if (skb->protocol == htons(ETH_P_PPP_SES))
458 		nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
459 
460 	/* Must drop socket now because of tproxy. */
461 	skb_orphan(skb);
462 	return skb->dev;
463 }
464 
465 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
466  * Replicate the checks that IPv4 does on packet reception.
467  * Set skb->dev to the bridge device (i.e. parent of the
468  * receiving device) to make netfilter happy, the REDIRECT
469  * target in particular.  Save the original destination IP
470  * address to be able to detect DNAT afterwards. */
471 static unsigned int br_nf_pre_routing(void *priv,
472 				      struct sk_buff *skb,
473 				      const struct nf_hook_state *state)
474 {
475 	struct nf_bridge_info *nf_bridge;
476 	struct net_bridge_port *p;
477 	struct net_bridge *br;
478 	__u32 len = nf_bridge_encap_header_len(skb);
479 
480 	if (unlikely(!pskb_may_pull(skb, len)))
481 		return NF_DROP;
482 
483 	p = br_port_get_rcu(state->in);
484 	if (p == NULL)
485 		return NF_DROP;
486 	br = p->br;
487 
488 	if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
489 		if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
490 			return NF_ACCEPT;
491 
492 		nf_bridge_pull_encap_header_rcsum(skb);
493 		return br_nf_pre_routing_ipv6(priv, skb, state);
494 	}
495 
496 	if (!brnf_call_iptables && !br->nf_call_iptables)
497 		return NF_ACCEPT;
498 
499 	if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
500 		return NF_ACCEPT;
501 
502 	nf_bridge_pull_encap_header_rcsum(skb);
503 
504 	if (br_validate_ipv4(state->net, skb))
505 		return NF_DROP;
506 
507 	nf_bridge_put(skb->nf_bridge);
508 	if (!nf_bridge_alloc(skb))
509 		return NF_DROP;
510 	if (!setup_pre_routing(skb))
511 		return NF_DROP;
512 
513 	nf_bridge = nf_bridge_info_get(skb);
514 	nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
515 
516 	skb->protocol = htons(ETH_P_IP);
517 
518 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
519 		skb->dev, NULL,
520 		br_nf_pre_routing_finish);
521 
522 	return NF_STOLEN;
523 }
524 
525 
526 /* PF_BRIDGE/FORWARD *************************************************/
527 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
528 {
529 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
530 	struct net_device *in;
531 
532 	if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
533 
534 		if (skb->protocol == htons(ETH_P_IP))
535 			nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
536 
537 		if (skb->protocol == htons(ETH_P_IPV6))
538 			nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
539 
540 		in = nf_bridge->physindev;
541 		if (nf_bridge->pkt_otherhost) {
542 			skb->pkt_type = PACKET_OTHERHOST;
543 			nf_bridge->pkt_otherhost = false;
544 		}
545 		nf_bridge_update_protocol(skb);
546 	} else {
547 		in = *((struct net_device **)(skb->cb));
548 	}
549 	nf_bridge_push_encap_header(skb);
550 
551 	br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
552 			  br_forward_finish);
553 	return 0;
554 }
555 
556 
557 /* This is the 'purely bridged' case.  For IP, we pass the packet to
558  * netfilter with indev and outdev set to the bridge device,
559  * but we are still able to filter on the 'real' indev/outdev
560  * because of the physdev module. For ARP, indev and outdev are the
561  * bridge ports. */
562 static unsigned int br_nf_forward_ip(void *priv,
563 				     struct sk_buff *skb,
564 				     const struct nf_hook_state *state)
565 {
566 	struct nf_bridge_info *nf_bridge;
567 	struct net_device *parent;
568 	u_int8_t pf;
569 
570 	if (!skb->nf_bridge)
571 		return NF_ACCEPT;
572 
573 	/* Need exclusive nf_bridge_info since we might have multiple
574 	 * different physoutdevs. */
575 	if (!nf_bridge_unshare(skb))
576 		return NF_DROP;
577 
578 	nf_bridge = nf_bridge_info_get(skb);
579 	if (!nf_bridge)
580 		return NF_DROP;
581 
582 	parent = bridge_parent(state->out);
583 	if (!parent)
584 		return NF_DROP;
585 
586 	if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
587 		pf = NFPROTO_IPV4;
588 	else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
589 		pf = NFPROTO_IPV6;
590 	else
591 		return NF_ACCEPT;
592 
593 	nf_bridge_pull_encap_header(skb);
594 
595 	if (skb->pkt_type == PACKET_OTHERHOST) {
596 		skb->pkt_type = PACKET_HOST;
597 		nf_bridge->pkt_otherhost = true;
598 	}
599 
600 	if (pf == NFPROTO_IPV4) {
601 		if (br_validate_ipv4(state->net, skb))
602 			return NF_DROP;
603 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
604 	}
605 
606 	if (pf == NFPROTO_IPV6) {
607 		if (br_validate_ipv6(state->net, skb))
608 			return NF_DROP;
609 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
610 	}
611 
612 	nf_bridge->physoutdev = skb->dev;
613 	if (pf == NFPROTO_IPV4)
614 		skb->protocol = htons(ETH_P_IP);
615 	else
616 		skb->protocol = htons(ETH_P_IPV6);
617 
618 	NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
619 		brnf_get_logical_dev(skb, state->in),
620 		parent,	br_nf_forward_finish);
621 
622 	return NF_STOLEN;
623 }
624 
625 static unsigned int br_nf_forward_arp(void *priv,
626 				      struct sk_buff *skb,
627 				      const struct nf_hook_state *state)
628 {
629 	struct net_bridge_port *p;
630 	struct net_bridge *br;
631 	struct net_device **d = (struct net_device **)(skb->cb);
632 
633 	p = br_port_get_rcu(state->out);
634 	if (p == NULL)
635 		return NF_ACCEPT;
636 	br = p->br;
637 
638 	if (!brnf_call_arptables && !br->nf_call_arptables)
639 		return NF_ACCEPT;
640 
641 	if (!IS_ARP(skb)) {
642 		if (!IS_VLAN_ARP(skb))
643 			return NF_ACCEPT;
644 		nf_bridge_pull_encap_header(skb);
645 	}
646 
647 	if (arp_hdr(skb)->ar_pln != 4) {
648 		if (IS_VLAN_ARP(skb))
649 			nf_bridge_push_encap_header(skb);
650 		return NF_ACCEPT;
651 	}
652 	*d = state->in;
653 	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
654 		state->in, state->out, br_nf_forward_finish);
655 
656 	return NF_STOLEN;
657 }
658 
659 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
660 {
661 	struct brnf_frag_data *data;
662 	int err;
663 
664 	data = this_cpu_ptr(&brnf_frag_data_storage);
665 	err = skb_cow_head(skb, data->size);
666 
667 	if (err) {
668 		kfree_skb(skb);
669 		return 0;
670 	}
671 
672 	if (data->vlan_tci) {
673 		skb->vlan_tci = data->vlan_tci;
674 		skb->vlan_proto = data->vlan_proto;
675 	}
676 
677 	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
678 	__skb_push(skb, data->encap_size);
679 
680 	nf_bridge_info_free(skb);
681 	return br_dev_queue_push_xmit(net, sk, skb);
682 }
683 
684 static int
685 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
686 		  int (*output)(struct net *, struct sock *, struct sk_buff *))
687 {
688 	unsigned int mtu = ip_skb_dst_mtu(sk, skb);
689 	struct iphdr *iph = ip_hdr(skb);
690 
691 	if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
692 		     (IPCB(skb)->frag_max_size &&
693 		      IPCB(skb)->frag_max_size > mtu))) {
694 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
695 		kfree_skb(skb);
696 		return -EMSGSIZE;
697 	}
698 
699 	return ip_do_fragment(net, sk, skb, output);
700 }
701 
702 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
703 {
704 	if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
705 		return PPPOE_SES_HLEN;
706 	return 0;
707 }
708 
709 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
710 {
711 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
712 	unsigned int mtu, mtu_reserved;
713 
714 	mtu_reserved = nf_bridge_mtu_reduction(skb);
715 	mtu = skb->dev->mtu;
716 
717 	if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
718 		mtu = nf_bridge->frag_max_size;
719 
720 	if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
721 		nf_bridge_info_free(skb);
722 		return br_dev_queue_push_xmit(net, sk, skb);
723 	}
724 
725 	/* This is wrong! We should preserve the original fragment
726 	 * boundaries by preserving frag_list rather than refragmenting.
727 	 */
728 	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
729 	    skb->protocol == htons(ETH_P_IP)) {
730 		struct brnf_frag_data *data;
731 
732 		if (br_validate_ipv4(net, skb))
733 			goto drop;
734 
735 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
736 
737 		nf_bridge_update_protocol(skb);
738 
739 		data = this_cpu_ptr(&brnf_frag_data_storage);
740 
741 		data->vlan_tci = skb->vlan_tci;
742 		data->vlan_proto = skb->vlan_proto;
743 		data->encap_size = nf_bridge_encap_header_len(skb);
744 		data->size = ETH_HLEN + data->encap_size;
745 
746 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
747 						 data->size);
748 
749 		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
750 	}
751 	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
752 	    skb->protocol == htons(ETH_P_IPV6)) {
753 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
754 		struct brnf_frag_data *data;
755 
756 		if (br_validate_ipv6(net, skb))
757 			goto drop;
758 
759 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
760 
761 		nf_bridge_update_protocol(skb);
762 
763 		data = this_cpu_ptr(&brnf_frag_data_storage);
764 		data->encap_size = nf_bridge_encap_header_len(skb);
765 		data->size = ETH_HLEN + data->encap_size;
766 
767 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
768 						 data->size);
769 
770 		if (v6ops)
771 			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
772 
773 		kfree_skb(skb);
774 		return -EMSGSIZE;
775 	}
776 	nf_bridge_info_free(skb);
777 	return br_dev_queue_push_xmit(net, sk, skb);
778  drop:
779 	kfree_skb(skb);
780 	return 0;
781 }
782 
783 /* PF_BRIDGE/POST_ROUTING ********************************************/
784 static unsigned int br_nf_post_routing(void *priv,
785 				       struct sk_buff *skb,
786 				       const struct nf_hook_state *state)
787 {
788 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
789 	struct net_device *realoutdev = bridge_parent(skb->dev);
790 	u_int8_t pf;
791 
792 	/* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
793 	 * on a bridge, but was delivered locally and is now being routed:
794 	 *
795 	 * POST_ROUTING was already invoked from the ip stack.
796 	 */
797 	if (!nf_bridge || !nf_bridge->physoutdev)
798 		return NF_ACCEPT;
799 
800 	if (!realoutdev)
801 		return NF_DROP;
802 
803 	if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
804 		pf = NFPROTO_IPV4;
805 	else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
806 		pf = NFPROTO_IPV6;
807 	else
808 		return NF_ACCEPT;
809 
810 	/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
811 	 * about the value of skb->pkt_type. */
812 	if (skb->pkt_type == PACKET_OTHERHOST) {
813 		skb->pkt_type = PACKET_HOST;
814 		nf_bridge->pkt_otherhost = true;
815 	}
816 
817 	nf_bridge_pull_encap_header(skb);
818 	if (pf == NFPROTO_IPV4)
819 		skb->protocol = htons(ETH_P_IP);
820 	else
821 		skb->protocol = htons(ETH_P_IPV6);
822 
823 	NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
824 		NULL, realoutdev,
825 		br_nf_dev_queue_xmit);
826 
827 	return NF_STOLEN;
828 }
829 
830 /* IP/SABOTAGE *****************************************************/
831 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
832  * for the second time. */
833 static unsigned int ip_sabotage_in(void *priv,
834 				   struct sk_buff *skb,
835 				   const struct nf_hook_state *state)
836 {
837 	if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
838 		state->okfn(state->net, state->sk, skb);
839 		return NF_STOLEN;
840 	}
841 
842 	return NF_ACCEPT;
843 }
844 
845 /* This is called when br_netfilter has called into iptables/netfilter,
846  * and DNAT has taken place on a bridge-forwarded packet.
847  *
848  * neigh->output has created a new MAC header, with local br0 MAC
849  * as saddr.
850  *
851  * This restores the original MAC saddr of the bridged packet
852  * before invoking bridge forward logic to transmit the packet.
853  */
854 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
855 {
856 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
857 
858 	skb_pull(skb, ETH_HLEN);
859 	nf_bridge->bridged_dnat = 0;
860 
861 	BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
862 
863 	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
864 				       nf_bridge->neigh_header,
865 				       ETH_HLEN - ETH_ALEN);
866 	skb->dev = nf_bridge->physindev;
867 
868 	nf_bridge->physoutdev = NULL;
869 	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
870 }
871 
872 static int br_nf_dev_xmit(struct sk_buff *skb)
873 {
874 	if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
875 		br_nf_pre_routing_finish_bridge_slow(skb);
876 		return 1;
877 	}
878 	return 0;
879 }
880 
881 static const struct nf_br_ops br_ops = {
882 	.br_dev_xmit_hook =	br_nf_dev_xmit,
883 };
884 
885 void br_netfilter_enable(void)
886 {
887 }
888 EXPORT_SYMBOL_GPL(br_netfilter_enable);
889 
890 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
891  * br_dev_queue_push_xmit is called afterwards */
892 static const struct nf_hook_ops br_nf_ops[] = {
893 	{
894 		.hook = br_nf_pre_routing,
895 		.pf = NFPROTO_BRIDGE,
896 		.hooknum = NF_BR_PRE_ROUTING,
897 		.priority = NF_BR_PRI_BRNF,
898 	},
899 	{
900 		.hook = br_nf_forward_ip,
901 		.pf = NFPROTO_BRIDGE,
902 		.hooknum = NF_BR_FORWARD,
903 		.priority = NF_BR_PRI_BRNF - 1,
904 	},
905 	{
906 		.hook = br_nf_forward_arp,
907 		.pf = NFPROTO_BRIDGE,
908 		.hooknum = NF_BR_FORWARD,
909 		.priority = NF_BR_PRI_BRNF,
910 	},
911 	{
912 		.hook = br_nf_post_routing,
913 		.pf = NFPROTO_BRIDGE,
914 		.hooknum = NF_BR_POST_ROUTING,
915 		.priority = NF_BR_PRI_LAST,
916 	},
917 	{
918 		.hook = ip_sabotage_in,
919 		.pf = NFPROTO_IPV4,
920 		.hooknum = NF_INET_PRE_ROUTING,
921 		.priority = NF_IP_PRI_FIRST,
922 	},
923 	{
924 		.hook = ip_sabotage_in,
925 		.pf = NFPROTO_IPV6,
926 		.hooknum = NF_INET_PRE_ROUTING,
927 		.priority = NF_IP6_PRI_FIRST,
928 	},
929 };
930 
931 static int brnf_device_event(struct notifier_block *unused, unsigned long event,
932 			     void *ptr)
933 {
934 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
935 	struct brnf_net *brnet;
936 	struct net *net;
937 	int ret;
938 
939 	if (event != NETDEV_REGISTER || !(dev->priv_flags & IFF_EBRIDGE))
940 		return NOTIFY_DONE;
941 
942 	ASSERT_RTNL();
943 
944 	net = dev_net(dev);
945 	brnet = net_generic(net, brnf_net_id);
946 	if (brnet->enabled)
947 		return NOTIFY_OK;
948 
949 	ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
950 	if (ret)
951 		return NOTIFY_BAD;
952 
953 	brnet->enabled = true;
954 	return NOTIFY_OK;
955 }
956 
957 static void __net_exit brnf_exit_net(struct net *net)
958 {
959 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
960 
961 	if (!brnet->enabled)
962 		return;
963 
964 	nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
965 	brnet->enabled = false;
966 }
967 
968 static struct pernet_operations brnf_net_ops __read_mostly = {
969 	.exit = brnf_exit_net,
970 	.id   = &brnf_net_id,
971 	.size = sizeof(struct brnf_net),
972 };
973 
974 static struct notifier_block brnf_notifier __read_mostly = {
975 	.notifier_call = brnf_device_event,
976 };
977 
978 /* recursively invokes nf_hook_slow (again), skipping already-called
979  * hooks (< NF_BR_PRI_BRNF).
980  *
981  * Called with rcu read lock held.
982  */
983 int br_nf_hook_thresh(unsigned int hook, struct net *net,
984 		      struct sock *sk, struct sk_buff *skb,
985 		      struct net_device *indev,
986 		      struct net_device *outdev,
987 		      int (*okfn)(struct net *, struct sock *,
988 				  struct sk_buff *))
989 {
990 	const struct nf_hook_entries *e;
991 	struct nf_hook_state state;
992 	struct nf_hook_ops **ops;
993 	unsigned int i;
994 	int ret;
995 
996 	e = rcu_dereference(net->nf.hooks_bridge[hook]);
997 	if (!e)
998 		return okfn(net, sk, skb);
999 
1000 	ops = nf_hook_entries_get_hook_ops(e);
1001 	for (i = 0; i < e->num_hook_entries &&
1002 	      ops[i]->priority <= NF_BR_PRI_BRNF; i++)
1003 		;
1004 
1005 	nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1006 			   sk, net, okfn);
1007 
1008 	ret = nf_hook_slow(skb, &state, e, i);
1009 	if (ret == 1)
1010 		ret = okfn(net, sk, skb);
1011 
1012 	return ret;
1013 }
1014 
1015 #ifdef CONFIG_SYSCTL
1016 static
1017 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1018 			    void __user *buffer, size_t *lenp, loff_t *ppos)
1019 {
1020 	int ret;
1021 
1022 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1023 
1024 	if (write && *(int *)(ctl->data))
1025 		*(int *)(ctl->data) = 1;
1026 	return ret;
1027 }
1028 
1029 static struct ctl_table brnf_table[] = {
1030 	{
1031 		.procname	= "bridge-nf-call-arptables",
1032 		.data		= &brnf_call_arptables,
1033 		.maxlen		= sizeof(int),
1034 		.mode		= 0644,
1035 		.proc_handler	= brnf_sysctl_call_tables,
1036 	},
1037 	{
1038 		.procname	= "bridge-nf-call-iptables",
1039 		.data		= &brnf_call_iptables,
1040 		.maxlen		= sizeof(int),
1041 		.mode		= 0644,
1042 		.proc_handler	= brnf_sysctl_call_tables,
1043 	},
1044 	{
1045 		.procname	= "bridge-nf-call-ip6tables",
1046 		.data		= &brnf_call_ip6tables,
1047 		.maxlen		= sizeof(int),
1048 		.mode		= 0644,
1049 		.proc_handler	= brnf_sysctl_call_tables,
1050 	},
1051 	{
1052 		.procname	= "bridge-nf-filter-vlan-tagged",
1053 		.data		= &brnf_filter_vlan_tagged,
1054 		.maxlen		= sizeof(int),
1055 		.mode		= 0644,
1056 		.proc_handler	= brnf_sysctl_call_tables,
1057 	},
1058 	{
1059 		.procname	= "bridge-nf-filter-pppoe-tagged",
1060 		.data		= &brnf_filter_pppoe_tagged,
1061 		.maxlen		= sizeof(int),
1062 		.mode		= 0644,
1063 		.proc_handler	= brnf_sysctl_call_tables,
1064 	},
1065 	{
1066 		.procname	= "bridge-nf-pass-vlan-input-dev",
1067 		.data		= &brnf_pass_vlan_indev,
1068 		.maxlen		= sizeof(int),
1069 		.mode		= 0644,
1070 		.proc_handler	= brnf_sysctl_call_tables,
1071 	},
1072 	{ }
1073 };
1074 #endif
1075 
1076 static int __init br_netfilter_init(void)
1077 {
1078 	int ret;
1079 
1080 	ret = register_pernet_subsys(&brnf_net_ops);
1081 	if (ret < 0)
1082 		return ret;
1083 
1084 	ret = register_netdevice_notifier(&brnf_notifier);
1085 	if (ret < 0) {
1086 		unregister_pernet_subsys(&brnf_net_ops);
1087 		return ret;
1088 	}
1089 
1090 #ifdef CONFIG_SYSCTL
1091 	brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1092 	if (brnf_sysctl_header == NULL) {
1093 		printk(KERN_WARNING
1094 		       "br_netfilter: can't register to sysctl.\n");
1095 		unregister_netdevice_notifier(&brnf_notifier);
1096 		unregister_pernet_subsys(&brnf_net_ops);
1097 		return -ENOMEM;
1098 	}
1099 #endif
1100 	RCU_INIT_POINTER(nf_br_ops, &br_ops);
1101 	printk(KERN_NOTICE "Bridge firewalling registered\n");
1102 	return 0;
1103 }
1104 
1105 static void __exit br_netfilter_fini(void)
1106 {
1107 	RCU_INIT_POINTER(nf_br_ops, NULL);
1108 	unregister_netdevice_notifier(&brnf_notifier);
1109 	unregister_pernet_subsys(&brnf_net_ops);
1110 #ifdef CONFIG_SYSCTL
1111 	unregister_net_sysctl_table(brnf_sysctl_header);
1112 #endif
1113 }
1114 
1115 module_init(br_netfilter_init);
1116 module_exit(br_netfilter_fini);
1117 
1118 MODULE_LICENSE("GPL");
1119 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1120 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1121 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
1122