1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle firewalling
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 * Bart De Schuymer <bdschuym@pandora.be>
9 *
10 * Lennert dedicates this file to Kerstin Wurdinger.
11 */
12
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/ip.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/if_arp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_pppox.h>
23 #include <linux/ppp_defs.h>
24 #include <linux/netfilter_bridge.h>
25 #include <uapi/linux/netfilter_bridge.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter_arp.h>
29 #include <linux/in_route.h>
30 #include <linux/rculist.h>
31 #include <linux/inetdevice.h>
32
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/addrconf.h>
36 #include <net/dst_metadata.h>
37 #include <net/route.h>
38 #include <net/netfilter/br_netfilter.h>
39 #include <net/netns/generic.h>
40
41 #include <linux/uaccess.h>
42 #include "br_private.h"
43 #ifdef CONFIG_SYSCTL
44 #include <linux/sysctl.h>
45 #endif
46
47 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
48 #include <net/netfilter/nf_conntrack_core.h>
49 #endif
50
51 static unsigned int brnf_net_id __read_mostly;
52
53 struct brnf_net {
54 bool enabled;
55
56 #ifdef CONFIG_SYSCTL
57 struct ctl_table_header *ctl_hdr;
58 #endif
59
60 /* default value is 1 */
61 int call_iptables;
62 int call_ip6tables;
63 int call_arptables;
64
65 /* default value is 0 */
66 int filter_vlan_tagged;
67 int filter_pppoe_tagged;
68 int pass_vlan_indev;
69 };
70
71 #define IS_IP(skb) \
72 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
73
74 #define IS_IPV6(skb) \
75 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
76
77 #define IS_ARP(skb) \
78 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
79
vlan_proto(const struct sk_buff * skb)80 static inline __be16 vlan_proto(const struct sk_buff *skb)
81 {
82 if (skb_vlan_tag_present(skb))
83 return skb->protocol;
84 else if (skb->protocol == htons(ETH_P_8021Q))
85 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
86 else
87 return 0;
88 }
89
is_vlan_ip(const struct sk_buff * skb,const struct net * net)90 static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
91 {
92 struct brnf_net *brnet = net_generic(net, brnf_net_id);
93
94 return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
95 }
96
is_vlan_ipv6(const struct sk_buff * skb,const struct net * net)97 static inline bool is_vlan_ipv6(const struct sk_buff *skb,
98 const struct net *net)
99 {
100 struct brnf_net *brnet = net_generic(net, brnf_net_id);
101
102 return vlan_proto(skb) == htons(ETH_P_IPV6) &&
103 brnet->filter_vlan_tagged;
104 }
105
is_vlan_arp(const struct sk_buff * skb,const struct net * net)106 static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
107 {
108 struct brnf_net *brnet = net_generic(net, brnf_net_id);
109
110 return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
111 }
112
pppoe_proto(const struct sk_buff * skb)113 static inline __be16 pppoe_proto(const struct sk_buff *skb)
114 {
115 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
116 sizeof(struct pppoe_hdr)));
117 }
118
is_pppoe_ip(const struct sk_buff * skb,const struct net * net)119 static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
120 {
121 struct brnf_net *brnet = net_generic(net, brnf_net_id);
122
123 return skb->protocol == htons(ETH_P_PPP_SES) &&
124 pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
125 }
126
is_pppoe_ipv6(const struct sk_buff * skb,const struct net * net)127 static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
128 const struct net *net)
129 {
130 struct brnf_net *brnet = net_generic(net, brnf_net_id);
131
132 return skb->protocol == htons(ETH_P_PPP_SES) &&
133 pppoe_proto(skb) == htons(PPP_IPV6) &&
134 brnet->filter_pppoe_tagged;
135 }
136
137 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
138 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
139
140 struct brnf_frag_data {
141 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
142 u8 encap_size;
143 u8 size;
144 u16 vlan_tci;
145 __be16 vlan_proto;
146 };
147
148 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
149
nf_bridge_info_free(struct sk_buff * skb)150 static void nf_bridge_info_free(struct sk_buff *skb)
151 {
152 skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
153 }
154
bridge_parent(const struct net_device * dev)155 static inline struct net_device *bridge_parent(const struct net_device *dev)
156 {
157 struct net_bridge_port *port;
158
159 port = br_port_get_rcu(dev);
160 return port ? port->br->dev : NULL;
161 }
162
nf_bridge_unshare(struct sk_buff * skb)163 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
164 {
165 return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
166 }
167
nf_bridge_encap_header_len(const struct sk_buff * skb)168 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
169 {
170 switch (skb->protocol) {
171 case __cpu_to_be16(ETH_P_8021Q):
172 return VLAN_HLEN;
173 case __cpu_to_be16(ETH_P_PPP_SES):
174 return PPPOE_SES_HLEN;
175 default:
176 return 0;
177 }
178 }
179
nf_bridge_pull_encap_header(struct sk_buff * skb)180 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
181 {
182 unsigned int len = nf_bridge_encap_header_len(skb);
183
184 skb_pull(skb, len);
185 skb->network_header += len;
186 }
187
nf_bridge_pull_encap_header_rcsum(struct sk_buff * skb)188 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
189 {
190 unsigned int len = nf_bridge_encap_header_len(skb);
191
192 skb_pull_rcsum(skb, len);
193 skb->network_header += len;
194 }
195
196 /* When handing a packet over to the IP layer
197 * check whether we have a skb that is in the
198 * expected format
199 */
200
br_validate_ipv4(struct net * net,struct sk_buff * skb)201 static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
202 {
203 const struct iphdr *iph;
204 u32 len;
205
206 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
207 goto inhdr_error;
208
209 iph = ip_hdr(skb);
210
211 /* Basic sanity checks */
212 if (iph->ihl < 5 || iph->version != 4)
213 goto inhdr_error;
214
215 if (!pskb_may_pull(skb, iph->ihl*4))
216 goto inhdr_error;
217
218 iph = ip_hdr(skb);
219 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
220 goto csum_error;
221
222 len = skb_ip_totlen(skb);
223 if (skb->len < len) {
224 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
225 goto drop;
226 } else if (len < (iph->ihl*4))
227 goto inhdr_error;
228
229 if (pskb_trim_rcsum(skb, len)) {
230 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
231 goto drop;
232 }
233
234 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
235 /* We should really parse IP options here but until
236 * somebody who actually uses IP options complains to
237 * us we'll just silently ignore the options because
238 * we're lazy!
239 */
240 return 0;
241
242 csum_error:
243 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
244 inhdr_error:
245 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
246 drop:
247 return -1;
248 }
249
nf_bridge_update_protocol(struct sk_buff * skb)250 void nf_bridge_update_protocol(struct sk_buff *skb)
251 {
252 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
253
254 switch (nf_bridge->orig_proto) {
255 case BRNF_PROTO_8021Q:
256 skb->protocol = htons(ETH_P_8021Q);
257 break;
258 case BRNF_PROTO_PPPOE:
259 skb->protocol = htons(ETH_P_PPP_SES);
260 break;
261 case BRNF_PROTO_UNCHANGED:
262 break;
263 }
264 }
265
266 /* Obtain the correct destination MAC address, while preserving the original
267 * source MAC address. If we already know this address, we just copy it. If we
268 * don't, we use the neighbour framework to find out. In both cases, we make
269 * sure that br_handle_frame_finish() is called afterwards.
270 */
br_nf_pre_routing_finish_bridge(struct net * net,struct sock * sk,struct sk_buff * skb)271 int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
272 {
273 struct neighbour *neigh;
274 struct dst_entry *dst;
275
276 skb->dev = bridge_parent(skb->dev);
277 if (!skb->dev)
278 goto free_skb;
279 dst = skb_dst(skb);
280 neigh = dst_neigh_lookup_skb(dst, skb);
281 if (neigh) {
282 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
283 int ret;
284
285 if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
286 READ_ONCE(neigh->hh.hh_len)) {
287 struct net_device *br_indev;
288
289 br_indev = nf_bridge_get_physindev(skb, net);
290 if (!br_indev) {
291 neigh_release(neigh);
292 goto free_skb;
293 }
294
295 neigh_hh_bridge(&neigh->hh, skb);
296 skb->dev = br_indev;
297
298 ret = br_handle_frame_finish(net, sk, skb);
299 } else {
300 /* the neighbour function below overwrites the complete
301 * MAC header, so we save the Ethernet source address and
302 * protocol number.
303 */
304 skb_copy_from_linear_data_offset(skb,
305 -(ETH_HLEN-ETH_ALEN),
306 nf_bridge->neigh_header,
307 ETH_HLEN-ETH_ALEN);
308 /* tell br_dev_xmit to continue with forwarding */
309 nf_bridge->bridged_dnat = 1;
310 /* FIXME Need to refragment */
311 ret = READ_ONCE(neigh->output)(neigh, skb);
312 }
313 neigh_release(neigh);
314 return ret;
315 }
316 free_skb:
317 kfree_skb(skb);
318 return 0;
319 }
320
321 static inline bool
br_nf_ipv4_daddr_was_changed(const struct sk_buff * skb,const struct nf_bridge_info * nf_bridge)322 br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
323 const struct nf_bridge_info *nf_bridge)
324 {
325 return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
326 }
327
328 /* This requires some explaining. If DNAT has taken place,
329 * we will need to fix up the destination Ethernet address.
330 * This is also true when SNAT takes place (for the reply direction).
331 *
332 * There are two cases to consider:
333 * 1. The packet was DNAT'ed to a device in the same bridge
334 * port group as it was received on. We can still bridge
335 * the packet.
336 * 2. The packet was DNAT'ed to a different device, either
337 * a non-bridged device or another bridge port group.
338 * The packet will need to be routed.
339 *
340 * The correct way of distinguishing between these two cases is to
341 * call ip_route_input() and to look at skb->dst->dev, which is
342 * changed to the destination device if ip_route_input() succeeds.
343 *
344 * Let's first consider the case that ip_route_input() succeeds:
345 *
346 * If the output device equals the logical bridge device the packet
347 * came in on, we can consider this bridging. The corresponding MAC
348 * address will be obtained in br_nf_pre_routing_finish_bridge.
349 * Otherwise, the packet is considered to be routed and we just
350 * change the destination MAC address so that the packet will
351 * later be passed up to the IP stack to be routed. For a redirected
352 * packet, ip_route_input() will give back the localhost as output device,
353 * which differs from the bridge device.
354 *
355 * Let's now consider the case that ip_route_input() fails:
356 *
357 * This can be because the destination address is martian, in which case
358 * the packet will be dropped.
359 * If IP forwarding is disabled, ip_route_input() will fail, while
360 * ip_route_output_key() can return success. The source
361 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
362 * thinks we're handling a locally generated packet and won't care
363 * if IP forwarding is enabled. If the output device equals the logical bridge
364 * device, we proceed as if ip_route_input() succeeded. If it differs from the
365 * logical bridge port or if ip_route_output_key() fails we drop the packet.
366 */
br_nf_pre_routing_finish(struct net * net,struct sock * sk,struct sk_buff * skb)367 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
368 {
369 struct net_device *dev = skb->dev, *br_indev;
370 struct iphdr *iph = ip_hdr(skb);
371 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
372 struct rtable *rt;
373 int err;
374
375 br_indev = nf_bridge_get_physindev(skb, net);
376 if (!br_indev) {
377 kfree_skb(skb);
378 return 0;
379 }
380
381 nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
382
383 if (nf_bridge->pkt_otherhost) {
384 skb->pkt_type = PACKET_OTHERHOST;
385 nf_bridge->pkt_otherhost = false;
386 }
387 nf_bridge->in_prerouting = 0;
388 if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
389 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
390 struct in_device *in_dev = __in_dev_get_rcu(dev);
391
392 /* If err equals -EHOSTUNREACH the error is due to a
393 * martian destination or due to the fact that
394 * forwarding is disabled. For most martian packets,
395 * ip_route_output_key() will fail. It won't fail for 2 types of
396 * martian destinations: loopback destinations and destination
397 * 0.0.0.0. In both cases the packet will be dropped because the
398 * destination is the loopback device and not the bridge. */
399 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
400 goto free_skb;
401
402 rt = ip_route_output(net, iph->daddr, 0,
403 RT_TOS(iph->tos), 0);
404 if (!IS_ERR(rt)) {
405 /* - Bridged-and-DNAT'ed traffic doesn't
406 * require ip_forwarding. */
407 if (rt->dst.dev == dev) {
408 skb_dst_drop(skb);
409 skb_dst_set(skb, &rt->dst);
410 goto bridged_dnat;
411 }
412 ip_rt_put(rt);
413 }
414 free_skb:
415 kfree_skb(skb);
416 return 0;
417 } else {
418 if (skb_dst(skb)->dev == dev) {
419 bridged_dnat:
420 skb->dev = br_indev;
421 nf_bridge_update_protocol(skb);
422 nf_bridge_push_encap_header(skb);
423 br_nf_hook_thresh(NF_BR_PRE_ROUTING,
424 net, sk, skb, skb->dev,
425 NULL,
426 br_nf_pre_routing_finish_bridge);
427 return 0;
428 }
429 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
430 skb->pkt_type = PACKET_HOST;
431 }
432 } else {
433 rt = bridge_parent_rtable(br_indev);
434 if (!rt) {
435 kfree_skb(skb);
436 return 0;
437 }
438 skb_dst_drop(skb);
439 skb_dst_set_noref(skb, &rt->dst);
440 }
441
442 skb->dev = br_indev;
443 nf_bridge_update_protocol(skb);
444 nf_bridge_push_encap_header(skb);
445 br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
446 br_handle_frame_finish);
447 return 0;
448 }
449
brnf_get_logical_dev(struct sk_buff * skb,const struct net_device * dev,const struct net * net)450 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
451 const struct net_device *dev,
452 const struct net *net)
453 {
454 struct net_device *vlan, *br;
455 struct brnf_net *brnet = net_generic(net, brnf_net_id);
456
457 br = bridge_parent(dev);
458
459 if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
460 return br;
461
462 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
463 skb_vlan_tag_get(skb) & VLAN_VID_MASK);
464
465 return vlan ? vlan : br;
466 }
467
468 /* Some common code for IPv4/IPv6 */
setup_pre_routing(struct sk_buff * skb,const struct net * net)469 struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
470 {
471 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
472
473 if (skb->pkt_type == PACKET_OTHERHOST) {
474 skb->pkt_type = PACKET_HOST;
475 nf_bridge->pkt_otherhost = true;
476 }
477
478 nf_bridge->in_prerouting = 1;
479 nf_bridge->physinif = skb->dev->ifindex;
480 skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
481
482 if (skb->protocol == htons(ETH_P_8021Q))
483 nf_bridge->orig_proto = BRNF_PROTO_8021Q;
484 else if (skb->protocol == htons(ETH_P_PPP_SES))
485 nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
486
487 /* Must drop socket now because of tproxy. */
488 skb_orphan(skb);
489 return skb->dev;
490 }
491
492 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
493 * Replicate the checks that IPv4 does on packet reception.
494 * Set skb->dev to the bridge device (i.e. parent of the
495 * receiving device) to make netfilter happy, the REDIRECT
496 * target in particular. Save the original destination IP
497 * address to be able to detect DNAT afterwards. */
br_nf_pre_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)498 static unsigned int br_nf_pre_routing(void *priv,
499 struct sk_buff *skb,
500 const struct nf_hook_state *state)
501 {
502 struct nf_bridge_info *nf_bridge;
503 struct net_bridge_port *p;
504 struct net_bridge *br;
505 __u32 len = nf_bridge_encap_header_len(skb);
506 struct brnf_net *brnet;
507
508 if (unlikely(!pskb_may_pull(skb, len)))
509 return NF_DROP;
510
511 p = br_port_get_rcu(state->in);
512 if (p == NULL)
513 return NF_DROP;
514 br = p->br;
515
516 brnet = net_generic(state->net, brnf_net_id);
517 if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
518 is_pppoe_ipv6(skb, state->net)) {
519 if (!brnet->call_ip6tables &&
520 !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
521 return NF_ACCEPT;
522 if (!ipv6_mod_enabled()) {
523 pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
524 return NF_DROP;
525 }
526
527 nf_bridge_pull_encap_header_rcsum(skb);
528 return br_nf_pre_routing_ipv6(priv, skb, state);
529 }
530
531 if (!brnet->call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
532 return NF_ACCEPT;
533
534 if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
535 !is_pppoe_ip(skb, state->net))
536 return NF_ACCEPT;
537
538 nf_bridge_pull_encap_header_rcsum(skb);
539
540 if (br_validate_ipv4(state->net, skb))
541 return NF_DROP;
542
543 if (!nf_bridge_alloc(skb))
544 return NF_DROP;
545 if (!setup_pre_routing(skb, state->net))
546 return NF_DROP;
547
548 nf_bridge = nf_bridge_info_get(skb);
549 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
550
551 skb->protocol = htons(ETH_P_IP);
552 skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
553
554 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
555 skb->dev, NULL,
556 br_nf_pre_routing_finish);
557
558 return NF_STOLEN;
559 }
560
561 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
562 /* conntracks' nf_confirm logic cannot handle cloned skbs referencing
563 * the same nf_conn entry, which will happen for multicast (broadcast)
564 * Frames on bridges.
565 *
566 * Example:
567 * macvlan0
568 * br0
569 * ethX ethY
570 *
571 * ethX (or Y) receives multicast or broadcast packet containing
572 * an IP packet, not yet in conntrack table.
573 *
574 * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
575 * -> skb->_nfct now references a unconfirmed entry
576 * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
577 * interface.
578 * 3. skb gets passed up the stack.
579 * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
580 * and schedules a work queue to send them out on the lower devices.
581 *
582 * The clone skb->_nfct is not a copy, it is the same entry as the
583 * original skb. The macvlan rx handler then returns RX_HANDLER_PASS.
584 * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
585 *
586 * The Macvlan broadcast worker and normal confirm path will race.
587 *
588 * This race will not happen if step 2 already confirmed a clone. In that
589 * case later steps perform skb_clone() with skb->_nfct already confirmed (in
590 * hash table). This works fine.
591 *
592 * But such confirmation won't happen when eb/ip/nftables rules dropped the
593 * packets before they reached the nf_confirm step in postrouting.
594 *
595 * Work around this problem by explicit confirmation of the entry at
596 * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
597 * entry.
598 *
599 */
br_nf_local_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)600 static unsigned int br_nf_local_in(void *priv,
601 struct sk_buff *skb,
602 const struct nf_hook_state *state)
603 {
604 bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
605 struct nf_conntrack *nfct = skb_nfct(skb);
606 const struct nf_ct_hook *ct_hook;
607 struct nf_conn *ct;
608 int ret;
609
610 if (promisc) {
611 nf_reset_ct(skb);
612 return NF_ACCEPT;
613 }
614
615 if (!nfct || skb->pkt_type == PACKET_HOST)
616 return NF_ACCEPT;
617
618 ct = container_of(nfct, struct nf_conn, ct_general);
619 if (likely(nf_ct_is_confirmed(ct)))
620 return NF_ACCEPT;
621
622 if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
623 nf_reset_ct(skb);
624 return NF_ACCEPT;
625 }
626
627 WARN_ON_ONCE(skb_shared(skb));
628
629 /* We can't call nf_confirm here, it would create a dependency
630 * on nf_conntrack module.
631 */
632 ct_hook = rcu_dereference(nf_ct_hook);
633 if (!ct_hook) {
634 skb->_nfct = 0ul;
635 nf_conntrack_put(nfct);
636 return NF_ACCEPT;
637 }
638
639 nf_bridge_pull_encap_header(skb);
640 ret = ct_hook->confirm(skb);
641 switch (ret & NF_VERDICT_MASK) {
642 case NF_STOLEN:
643 return NF_STOLEN;
644 default:
645 nf_bridge_push_encap_header(skb);
646 break;
647 }
648
649 ct = container_of(nfct, struct nf_conn, ct_general);
650 WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
651
652 return ret;
653 }
654 #endif
655
656 /* PF_BRIDGE/FORWARD *************************************************/
br_nf_forward_finish(struct net * net,struct sock * sk,struct sk_buff * skb)657 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
658 {
659 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
660 struct net_device *in;
661
662 if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
663
664 if (skb->protocol == htons(ETH_P_IP))
665 nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
666
667 if (skb->protocol == htons(ETH_P_IPV6))
668 nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
669
670 in = nf_bridge_get_physindev(skb, net);
671 if (!in) {
672 kfree_skb(skb);
673 return 0;
674 }
675 if (nf_bridge->pkt_otherhost) {
676 skb->pkt_type = PACKET_OTHERHOST;
677 nf_bridge->pkt_otherhost = false;
678 }
679 nf_bridge_update_protocol(skb);
680 } else {
681 in = *((struct net_device **)(skb->cb));
682 }
683 nf_bridge_push_encap_header(skb);
684
685 br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
686 br_forward_finish);
687 return 0;
688 }
689
690
691 /* This is the 'purely bridged' case. For IP, we pass the packet to
692 * netfilter with indev and outdev set to the bridge device,
693 * but we are still able to filter on the 'real' indev/outdev
694 * because of the physdev module. For ARP, indev and outdev are the
695 * bridge ports. */
br_nf_forward_ip(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)696 static unsigned int br_nf_forward_ip(void *priv,
697 struct sk_buff *skb,
698 const struct nf_hook_state *state)
699 {
700 struct nf_bridge_info *nf_bridge;
701 struct net_device *parent;
702 u_int8_t pf;
703
704 nf_bridge = nf_bridge_info_get(skb);
705 if (!nf_bridge)
706 return NF_ACCEPT;
707
708 /* Need exclusive nf_bridge_info since we might have multiple
709 * different physoutdevs. */
710 if (!nf_bridge_unshare(skb))
711 return NF_DROP;
712
713 nf_bridge = nf_bridge_info_get(skb);
714 if (!nf_bridge)
715 return NF_DROP;
716
717 parent = bridge_parent(state->out);
718 if (!parent)
719 return NF_DROP;
720
721 if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
722 is_pppoe_ip(skb, state->net))
723 pf = NFPROTO_IPV4;
724 else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
725 is_pppoe_ipv6(skb, state->net))
726 pf = NFPROTO_IPV6;
727 else
728 return NF_ACCEPT;
729
730 nf_bridge_pull_encap_header(skb);
731
732 if (skb->pkt_type == PACKET_OTHERHOST) {
733 skb->pkt_type = PACKET_HOST;
734 nf_bridge->pkt_otherhost = true;
735 }
736
737 if (pf == NFPROTO_IPV4) {
738 if (br_validate_ipv4(state->net, skb))
739 return NF_DROP;
740 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
741 }
742
743 if (pf == NFPROTO_IPV6) {
744 if (br_validate_ipv6(state->net, skb))
745 return NF_DROP;
746 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
747 }
748
749 nf_bridge->physoutdev = skb->dev;
750 if (pf == NFPROTO_IPV4)
751 skb->protocol = htons(ETH_P_IP);
752 else
753 skb->protocol = htons(ETH_P_IPV6);
754
755 NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
756 brnf_get_logical_dev(skb, state->in, state->net),
757 parent, br_nf_forward_finish);
758
759 return NF_STOLEN;
760 }
761
br_nf_forward_arp(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)762 static unsigned int br_nf_forward_arp(void *priv,
763 struct sk_buff *skb,
764 const struct nf_hook_state *state)
765 {
766 struct net_bridge_port *p;
767 struct net_bridge *br;
768 struct net_device **d = (struct net_device **)(skb->cb);
769 struct brnf_net *brnet;
770
771 p = br_port_get_rcu(state->out);
772 if (p == NULL)
773 return NF_ACCEPT;
774 br = p->br;
775
776 brnet = net_generic(state->net, brnf_net_id);
777 if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
778 return NF_ACCEPT;
779
780 if (!IS_ARP(skb)) {
781 if (!is_vlan_arp(skb, state->net))
782 return NF_ACCEPT;
783 nf_bridge_pull_encap_header(skb);
784 }
785
786 if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
787 return NF_DROP;
788
789 if (arp_hdr(skb)->ar_pln != 4) {
790 if (is_vlan_arp(skb, state->net))
791 nf_bridge_push_encap_header(skb);
792 return NF_ACCEPT;
793 }
794 *d = state->in;
795 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
796 state->in, state->out, br_nf_forward_finish);
797
798 return NF_STOLEN;
799 }
800
br_nf_push_frag_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)801 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
802 {
803 struct brnf_frag_data *data;
804 int err;
805
806 data = this_cpu_ptr(&brnf_frag_data_storage);
807 err = skb_cow_head(skb, data->size);
808
809 if (err) {
810 kfree_skb(skb);
811 return 0;
812 }
813
814 if (data->vlan_proto)
815 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
816
817 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
818 __skb_push(skb, data->encap_size);
819
820 nf_bridge_info_free(skb);
821 return br_dev_queue_push_xmit(net, sk, skb);
822 }
823
824 static int
br_nf_ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))825 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
826 int (*output)(struct net *, struct sock *, struct sk_buff *))
827 {
828 unsigned int mtu = ip_skb_dst_mtu(sk, skb);
829 struct iphdr *iph = ip_hdr(skb);
830
831 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
832 (IPCB(skb)->frag_max_size &&
833 IPCB(skb)->frag_max_size > mtu))) {
834 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
835 kfree_skb(skb);
836 return -EMSGSIZE;
837 }
838
839 return ip_do_fragment(net, sk, skb, output);
840 }
841
nf_bridge_mtu_reduction(const struct sk_buff * skb)842 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
843 {
844 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
845
846 if (nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
847 return PPPOE_SES_HLEN;
848 return 0;
849 }
850
br_nf_dev_queue_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)851 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
852 {
853 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
854 unsigned int mtu, mtu_reserved;
855
856 mtu_reserved = nf_bridge_mtu_reduction(skb);
857 mtu = skb->dev->mtu;
858
859 if (nf_bridge->pkt_otherhost) {
860 skb->pkt_type = PACKET_OTHERHOST;
861 nf_bridge->pkt_otherhost = false;
862 }
863
864 if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
865 mtu = nf_bridge->frag_max_size;
866
867 nf_bridge_update_protocol(skb);
868 nf_bridge_push_encap_header(skb);
869
870 if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
871 nf_bridge_info_free(skb);
872 return br_dev_queue_push_xmit(net, sk, skb);
873 }
874
875 /* Fragmentation on metadata/template dst is not supported */
876 if (unlikely(!skb_valid_dst(skb)))
877 goto drop;
878
879 /* This is wrong! We should preserve the original fragment
880 * boundaries by preserving frag_list rather than refragmenting.
881 */
882 if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
883 skb->protocol == htons(ETH_P_IP)) {
884 struct brnf_frag_data *data;
885
886 if (br_validate_ipv4(net, skb))
887 goto drop;
888
889 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
890
891 data = this_cpu_ptr(&brnf_frag_data_storage);
892
893 if (skb_vlan_tag_present(skb)) {
894 data->vlan_tci = skb->vlan_tci;
895 data->vlan_proto = skb->vlan_proto;
896 } else {
897 data->vlan_proto = 0;
898 }
899
900 data->encap_size = nf_bridge_encap_header_len(skb);
901 data->size = ETH_HLEN + data->encap_size;
902
903 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
904 data->size);
905
906 return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
907 }
908 if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
909 skb->protocol == htons(ETH_P_IPV6)) {
910 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
911 struct brnf_frag_data *data;
912
913 if (br_validate_ipv6(net, skb))
914 goto drop;
915
916 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
917
918 data = this_cpu_ptr(&brnf_frag_data_storage);
919 data->encap_size = nf_bridge_encap_header_len(skb);
920 data->size = ETH_HLEN + data->encap_size;
921
922 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
923 data->size);
924
925 if (v6ops)
926 return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
927
928 kfree_skb(skb);
929 return -EMSGSIZE;
930 }
931 nf_bridge_info_free(skb);
932 return br_dev_queue_push_xmit(net, sk, skb);
933 drop:
934 kfree_skb(skb);
935 return 0;
936 }
937
938 /* PF_BRIDGE/POST_ROUTING ********************************************/
br_nf_post_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)939 static unsigned int br_nf_post_routing(void *priv,
940 struct sk_buff *skb,
941 const struct nf_hook_state *state)
942 {
943 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
944 struct net_device *realoutdev = bridge_parent(skb->dev);
945 u_int8_t pf;
946
947 /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
948 * on a bridge, but was delivered locally and is now being routed:
949 *
950 * POST_ROUTING was already invoked from the ip stack.
951 */
952 if (!nf_bridge || !nf_bridge->physoutdev)
953 return NF_ACCEPT;
954
955 if (!realoutdev)
956 return NF_DROP;
957
958 if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
959 is_pppoe_ip(skb, state->net))
960 pf = NFPROTO_IPV4;
961 else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
962 is_pppoe_ipv6(skb, state->net))
963 pf = NFPROTO_IPV6;
964 else
965 return NF_ACCEPT;
966
967 if (skb->pkt_type == PACKET_OTHERHOST) {
968 skb->pkt_type = PACKET_HOST;
969 nf_bridge->pkt_otherhost = true;
970 }
971
972 nf_bridge_pull_encap_header(skb);
973 if (pf == NFPROTO_IPV4)
974 skb->protocol = htons(ETH_P_IP);
975 else
976 skb->protocol = htons(ETH_P_IPV6);
977
978 NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
979 NULL, realoutdev,
980 br_nf_dev_queue_xmit);
981
982 return NF_STOLEN;
983 }
984
985 /* IP/SABOTAGE *****************************************************/
986 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
987 * for the second time. */
ip_sabotage_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)988 static unsigned int ip_sabotage_in(void *priv,
989 struct sk_buff *skb,
990 const struct nf_hook_state *state)
991 {
992 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
993
994 if (nf_bridge) {
995 if (nf_bridge->sabotage_in_done)
996 return NF_ACCEPT;
997
998 if (!nf_bridge->in_prerouting &&
999 !netif_is_l3_master(skb->dev) &&
1000 !netif_is_l3_slave(skb->dev)) {
1001 nf_bridge->sabotage_in_done = 1;
1002 state->okfn(state->net, state->sk, skb);
1003 return NF_STOLEN;
1004 }
1005 }
1006
1007 return NF_ACCEPT;
1008 }
1009
1010 /* This is called when br_netfilter has called into iptables/netfilter,
1011 * and DNAT has taken place on a bridge-forwarded packet.
1012 *
1013 * neigh->output has created a new MAC header, with local br0 MAC
1014 * as saddr.
1015 *
1016 * This restores the original MAC saddr of the bridged packet
1017 * before invoking bridge forward logic to transmit the packet.
1018 */
br_nf_pre_routing_finish_bridge_slow(struct sk_buff * skb)1019 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
1020 {
1021 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1022 struct net_device *br_indev;
1023
1024 br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
1025 if (!br_indev) {
1026 kfree_skb(skb);
1027 return;
1028 }
1029
1030 skb_pull(skb, ETH_HLEN);
1031 nf_bridge->bridged_dnat = 0;
1032
1033 BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
1034
1035 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
1036 nf_bridge->neigh_header,
1037 ETH_HLEN - ETH_ALEN);
1038 skb->dev = br_indev;
1039
1040 nf_bridge->physoutdev = NULL;
1041 br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
1042 }
1043
br_nf_dev_xmit(struct sk_buff * skb)1044 static int br_nf_dev_xmit(struct sk_buff *skb)
1045 {
1046 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1047
1048 if (nf_bridge && nf_bridge->bridged_dnat) {
1049 br_nf_pre_routing_finish_bridge_slow(skb);
1050 return 1;
1051 }
1052 return 0;
1053 }
1054
1055 static const struct nf_br_ops br_ops = {
1056 .br_dev_xmit_hook = br_nf_dev_xmit,
1057 };
1058
1059 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
1060 * br_dev_queue_push_xmit is called afterwards */
1061 static const struct nf_hook_ops br_nf_ops[] = {
1062 {
1063 .hook = br_nf_pre_routing,
1064 .pf = NFPROTO_BRIDGE,
1065 .hooknum = NF_BR_PRE_ROUTING,
1066 .priority = NF_BR_PRI_BRNF,
1067 },
1068 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1069 {
1070 .hook = br_nf_local_in,
1071 .pf = NFPROTO_BRIDGE,
1072 .hooknum = NF_BR_LOCAL_IN,
1073 .priority = NF_BR_PRI_LAST,
1074 },
1075 #endif
1076 {
1077 .hook = br_nf_forward_ip,
1078 .pf = NFPROTO_BRIDGE,
1079 .hooknum = NF_BR_FORWARD,
1080 .priority = NF_BR_PRI_BRNF - 1,
1081 },
1082 {
1083 .hook = br_nf_forward_arp,
1084 .pf = NFPROTO_BRIDGE,
1085 .hooknum = NF_BR_FORWARD,
1086 .priority = NF_BR_PRI_BRNF,
1087 },
1088 {
1089 .hook = br_nf_post_routing,
1090 .pf = NFPROTO_BRIDGE,
1091 .hooknum = NF_BR_POST_ROUTING,
1092 .priority = NF_BR_PRI_LAST,
1093 },
1094 {
1095 .hook = ip_sabotage_in,
1096 .pf = NFPROTO_IPV4,
1097 .hooknum = NF_INET_PRE_ROUTING,
1098 .priority = NF_IP_PRI_FIRST,
1099 },
1100 {
1101 .hook = ip_sabotage_in,
1102 .pf = NFPROTO_IPV6,
1103 .hooknum = NF_INET_PRE_ROUTING,
1104 .priority = NF_IP6_PRI_FIRST,
1105 },
1106 };
1107
brnf_device_event(struct notifier_block * unused,unsigned long event,void * ptr)1108 static int brnf_device_event(struct notifier_block *unused, unsigned long event,
1109 void *ptr)
1110 {
1111 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1112 struct brnf_net *brnet;
1113 struct net *net;
1114 int ret;
1115
1116 if (event != NETDEV_REGISTER || !netif_is_bridge_master(dev))
1117 return NOTIFY_DONE;
1118
1119 ASSERT_RTNL();
1120
1121 net = dev_net(dev);
1122 brnet = net_generic(net, brnf_net_id);
1123 if (brnet->enabled)
1124 return NOTIFY_OK;
1125
1126 ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1127 if (ret)
1128 return NOTIFY_BAD;
1129
1130 brnet->enabled = true;
1131 return NOTIFY_OK;
1132 }
1133
1134 static struct notifier_block brnf_notifier __read_mostly = {
1135 .notifier_call = brnf_device_event,
1136 };
1137
1138 /* recursively invokes nf_hook_slow (again), skipping already-called
1139 * hooks (< NF_BR_PRI_BRNF).
1140 *
1141 * Called with rcu read lock held.
1142 */
br_nf_hook_thresh(unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))1143 int br_nf_hook_thresh(unsigned int hook, struct net *net,
1144 struct sock *sk, struct sk_buff *skb,
1145 struct net_device *indev,
1146 struct net_device *outdev,
1147 int (*okfn)(struct net *, struct sock *,
1148 struct sk_buff *))
1149 {
1150 const struct nf_hook_entries *e;
1151 struct nf_hook_state state;
1152 struct nf_hook_ops **ops;
1153 unsigned int i;
1154 int ret;
1155
1156 e = rcu_dereference(net->nf.hooks_bridge[hook]);
1157 if (!e)
1158 return okfn(net, sk, skb);
1159
1160 ops = nf_hook_entries_get_hook_ops(e);
1161 for (i = 0; i < e->num_hook_entries; i++) {
1162 /* These hooks have already been called */
1163 if (ops[i]->priority < NF_BR_PRI_BRNF)
1164 continue;
1165
1166 /* These hooks have not been called yet, run them. */
1167 if (ops[i]->priority > NF_BR_PRI_BRNF)
1168 break;
1169
1170 /* take a closer look at NF_BR_PRI_BRNF. */
1171 if (ops[i]->hook == br_nf_pre_routing) {
1172 /* This hook diverted the skb to this function,
1173 * hooks after this have not been run yet.
1174 */
1175 i++;
1176 break;
1177 }
1178 }
1179
1180 nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1181 sk, net, okfn);
1182
1183 ret = nf_hook_slow(skb, &state, e, i);
1184 if (ret == 1)
1185 ret = okfn(net, sk, skb);
1186
1187 return ret;
1188 }
1189
1190 #ifdef CONFIG_SYSCTL
1191 static
brnf_sysctl_call_tables(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)1192 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1193 void *buffer, size_t *lenp, loff_t *ppos)
1194 {
1195 int ret;
1196
1197 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1198
1199 if (write && *(int *)(ctl->data))
1200 *(int *)(ctl->data) = 1;
1201 return ret;
1202 }
1203
1204 static struct ctl_table brnf_table[] = {
1205 {
1206 .procname = "bridge-nf-call-arptables",
1207 .maxlen = sizeof(int),
1208 .mode = 0644,
1209 .proc_handler = brnf_sysctl_call_tables,
1210 },
1211 {
1212 .procname = "bridge-nf-call-iptables",
1213 .maxlen = sizeof(int),
1214 .mode = 0644,
1215 .proc_handler = brnf_sysctl_call_tables,
1216 },
1217 {
1218 .procname = "bridge-nf-call-ip6tables",
1219 .maxlen = sizeof(int),
1220 .mode = 0644,
1221 .proc_handler = brnf_sysctl_call_tables,
1222 },
1223 {
1224 .procname = "bridge-nf-filter-vlan-tagged",
1225 .maxlen = sizeof(int),
1226 .mode = 0644,
1227 .proc_handler = brnf_sysctl_call_tables,
1228 },
1229 {
1230 .procname = "bridge-nf-filter-pppoe-tagged",
1231 .maxlen = sizeof(int),
1232 .mode = 0644,
1233 .proc_handler = brnf_sysctl_call_tables,
1234 },
1235 {
1236 .procname = "bridge-nf-pass-vlan-input-dev",
1237 .maxlen = sizeof(int),
1238 .mode = 0644,
1239 .proc_handler = brnf_sysctl_call_tables,
1240 },
1241 { }
1242 };
1243
br_netfilter_sysctl_default(struct brnf_net * brnf)1244 static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
1245 {
1246 brnf->call_iptables = 1;
1247 brnf->call_ip6tables = 1;
1248 brnf->call_arptables = 1;
1249 brnf->filter_vlan_tagged = 0;
1250 brnf->filter_pppoe_tagged = 0;
1251 brnf->pass_vlan_indev = 0;
1252 }
1253
br_netfilter_sysctl_init_net(struct net * net)1254 static int br_netfilter_sysctl_init_net(struct net *net)
1255 {
1256 struct ctl_table *table = brnf_table;
1257 struct brnf_net *brnet;
1258
1259 if (!net_eq(net, &init_net)) {
1260 table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
1261 if (!table)
1262 return -ENOMEM;
1263 }
1264
1265 brnet = net_generic(net, brnf_net_id);
1266 table[0].data = &brnet->call_arptables;
1267 table[1].data = &brnet->call_iptables;
1268 table[2].data = &brnet->call_ip6tables;
1269 table[3].data = &brnet->filter_vlan_tagged;
1270 table[4].data = &brnet->filter_pppoe_tagged;
1271 table[5].data = &brnet->pass_vlan_indev;
1272
1273 br_netfilter_sysctl_default(brnet);
1274
1275 brnet->ctl_hdr = register_net_sysctl_sz(net, "net/bridge", table,
1276 ARRAY_SIZE(brnf_table));
1277 if (!brnet->ctl_hdr) {
1278 if (!net_eq(net, &init_net))
1279 kfree(table);
1280
1281 return -ENOMEM;
1282 }
1283
1284 return 0;
1285 }
1286
br_netfilter_sysctl_exit_net(struct net * net,struct brnf_net * brnet)1287 static void br_netfilter_sysctl_exit_net(struct net *net,
1288 struct brnf_net *brnet)
1289 {
1290 struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
1291
1292 unregister_net_sysctl_table(brnet->ctl_hdr);
1293 if (!net_eq(net, &init_net))
1294 kfree(table);
1295 }
1296
brnf_init_net(struct net * net)1297 static int __net_init brnf_init_net(struct net *net)
1298 {
1299 return br_netfilter_sysctl_init_net(net);
1300 }
1301 #endif
1302
brnf_exit_net(struct net * net)1303 static void __net_exit brnf_exit_net(struct net *net)
1304 {
1305 struct brnf_net *brnet;
1306
1307 brnet = net_generic(net, brnf_net_id);
1308 if (brnet->enabled) {
1309 nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1310 brnet->enabled = false;
1311 }
1312
1313 #ifdef CONFIG_SYSCTL
1314 br_netfilter_sysctl_exit_net(net, brnet);
1315 #endif
1316 }
1317
1318 static struct pernet_operations brnf_net_ops __read_mostly = {
1319 #ifdef CONFIG_SYSCTL
1320 .init = brnf_init_net,
1321 #endif
1322 .exit = brnf_exit_net,
1323 .id = &brnf_net_id,
1324 .size = sizeof(struct brnf_net),
1325 };
1326
br_netfilter_init(void)1327 static int __init br_netfilter_init(void)
1328 {
1329 int ret;
1330
1331 ret = register_pernet_subsys(&brnf_net_ops);
1332 if (ret < 0)
1333 return ret;
1334
1335 ret = register_netdevice_notifier(&brnf_notifier);
1336 if (ret < 0) {
1337 unregister_pernet_subsys(&brnf_net_ops);
1338 return ret;
1339 }
1340
1341 RCU_INIT_POINTER(nf_br_ops, &br_ops);
1342 printk(KERN_NOTICE "Bridge firewalling registered\n");
1343 return 0;
1344 }
1345
br_netfilter_fini(void)1346 static void __exit br_netfilter_fini(void)
1347 {
1348 RCU_INIT_POINTER(nf_br_ops, NULL);
1349 unregister_netdevice_notifier(&brnf_notifier);
1350 unregister_pernet_subsys(&brnf_net_ops);
1351 }
1352
1353 module_init(br_netfilter_init);
1354 module_exit(br_netfilter_fini);
1355
1356 MODULE_LICENSE("GPL");
1357 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1358 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1359 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
1360