xref: /openbmc/linux/net/netfilter/core.c (revision 9709cf5b)
1 /* netfilter.c: look after the filters for various protocols.
2  * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3  *
4  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5  * way.
6  *
7  * This code is GPL.
8  */
9 #include <linux/kernel.h>
10 #include <linux/netfilter.h>
11 #include <net/protocol.h>
12 #include <linux/init.h>
13 #include <linux/skbuff.h>
14 #include <linux/wait.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/if.h>
18 #include <linux/netdevice.h>
19 #include <linux/netfilter_ipv6.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/rcupdate.h>
25 #include <net/net_namespace.h>
26 #include <net/netfilter/nf_queue.h>
27 #include <net/sock.h>
28 
29 #include "nf_internals.h"
30 
31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33 
34 DEFINE_PER_CPU(bool, nf_skb_duplicated);
35 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36 
37 #ifdef CONFIG_JUMP_LABEL
38 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 EXPORT_SYMBOL(nf_hooks_needed);
40 #endif
41 
42 static DEFINE_MUTEX(nf_hook_mutex);
43 
44 /* max hooks per family/hooknum */
45 #define MAX_HOOK_COUNT		1024
46 
47 #define nf_entry_dereference(e) \
48 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
49 
50 static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
51 {
52 	struct nf_hook_entries *e;
53 	size_t alloc = sizeof(*e) +
54 		       sizeof(struct nf_hook_entry) * num +
55 		       sizeof(struct nf_hook_ops *) * num +
56 		       sizeof(struct nf_hook_entries_rcu_head);
57 
58 	if (num == 0)
59 		return NULL;
60 
61 	e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT);
62 	if (e)
63 		e->num_hook_entries = num;
64 	return e;
65 }
66 
67 static void __nf_hook_entries_free(struct rcu_head *h)
68 {
69 	struct nf_hook_entries_rcu_head *head;
70 
71 	head = container_of(h, struct nf_hook_entries_rcu_head, head);
72 	kvfree(head->allocation);
73 }
74 
75 static void nf_hook_entries_free(struct nf_hook_entries *e)
76 {
77 	struct nf_hook_entries_rcu_head *head;
78 	struct nf_hook_ops **ops;
79 	unsigned int num;
80 
81 	if (!e)
82 		return;
83 
84 	num = e->num_hook_entries;
85 	ops = nf_hook_entries_get_hook_ops(e);
86 	head = (void *)&ops[num];
87 	head->allocation = e;
88 	call_rcu(&head->head, __nf_hook_entries_free);
89 }
90 
91 static unsigned int accept_all(void *priv,
92 			       struct sk_buff *skb,
93 			       const struct nf_hook_state *state)
94 {
95 	return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
96 }
97 
98 static const struct nf_hook_ops dummy_ops = {
99 	.hook = accept_all,
100 	.priority = INT_MIN,
101 };
102 
103 static struct nf_hook_entries *
104 nf_hook_entries_grow(const struct nf_hook_entries *old,
105 		     const struct nf_hook_ops *reg)
106 {
107 	unsigned int i, alloc_entries, nhooks, old_entries;
108 	struct nf_hook_ops **orig_ops = NULL;
109 	struct nf_hook_ops **new_ops;
110 	struct nf_hook_entries *new;
111 	bool inserted = false;
112 
113 	alloc_entries = 1;
114 	old_entries = old ? old->num_hook_entries : 0;
115 
116 	if (old) {
117 		orig_ops = nf_hook_entries_get_hook_ops(old);
118 
119 		for (i = 0; i < old_entries; i++) {
120 			if (orig_ops[i] != &dummy_ops)
121 				alloc_entries++;
122 		}
123 	}
124 
125 	if (alloc_entries > MAX_HOOK_COUNT)
126 		return ERR_PTR(-E2BIG);
127 
128 	new = allocate_hook_entries_size(alloc_entries);
129 	if (!new)
130 		return ERR_PTR(-ENOMEM);
131 
132 	new_ops = nf_hook_entries_get_hook_ops(new);
133 
134 	i = 0;
135 	nhooks = 0;
136 	while (i < old_entries) {
137 		if (orig_ops[i] == &dummy_ops) {
138 			++i;
139 			continue;
140 		}
141 
142 		if (inserted || reg->priority > orig_ops[i]->priority) {
143 			new_ops[nhooks] = (void *)orig_ops[i];
144 			new->hooks[nhooks] = old->hooks[i];
145 			i++;
146 		} else {
147 			new_ops[nhooks] = (void *)reg;
148 			new->hooks[nhooks].hook = reg->hook;
149 			new->hooks[nhooks].priv = reg->priv;
150 			inserted = true;
151 		}
152 		nhooks++;
153 	}
154 
155 	if (!inserted) {
156 		new_ops[nhooks] = (void *)reg;
157 		new->hooks[nhooks].hook = reg->hook;
158 		new->hooks[nhooks].priv = reg->priv;
159 	}
160 
161 	return new;
162 }
163 
164 static void hooks_validate(const struct nf_hook_entries *hooks)
165 {
166 #ifdef CONFIG_DEBUG_MISC
167 	struct nf_hook_ops **orig_ops;
168 	int prio = INT_MIN;
169 	size_t i = 0;
170 
171 	orig_ops = nf_hook_entries_get_hook_ops(hooks);
172 
173 	for (i = 0; i < hooks->num_hook_entries; i++) {
174 		if (orig_ops[i] == &dummy_ops)
175 			continue;
176 
177 		WARN_ON(orig_ops[i]->priority < prio);
178 
179 		if (orig_ops[i]->priority > prio)
180 			prio = orig_ops[i]->priority;
181 	}
182 #endif
183 }
184 
185 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
186 				const struct nf_hook_ops *reg)
187 {
188 	struct nf_hook_entries *new_hooks;
189 	struct nf_hook_entries *p;
190 
191 	p = rcu_dereference_raw(*pp);
192 	new_hooks = nf_hook_entries_grow(p, reg);
193 	if (IS_ERR(new_hooks))
194 		return PTR_ERR(new_hooks);
195 
196 	hooks_validate(new_hooks);
197 
198 	rcu_assign_pointer(*pp, new_hooks);
199 
200 	BUG_ON(p == new_hooks);
201 	nf_hook_entries_free(p);
202 	return 0;
203 }
204 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
205 
206 /*
207  * __nf_hook_entries_try_shrink - try to shrink hook array
208  *
209  * @old -- current hook blob at @pp
210  * @pp -- location of hook blob
211  *
212  * Hook unregistration must always succeed, so to-be-removed hooks
213  * are replaced by a dummy one that will just move to next hook.
214  *
215  * This counts the current dummy hooks, attempts to allocate new blob,
216  * copies the live hooks, then replaces and discards old one.
217  *
218  * return values:
219  *
220  * Returns address to free, or NULL.
221  */
222 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
223 					  struct nf_hook_entries __rcu **pp)
224 {
225 	unsigned int i, j, skip = 0, hook_entries;
226 	struct nf_hook_entries *new = NULL;
227 	struct nf_hook_ops **orig_ops;
228 	struct nf_hook_ops **new_ops;
229 
230 	if (WARN_ON_ONCE(!old))
231 		return NULL;
232 
233 	orig_ops = nf_hook_entries_get_hook_ops(old);
234 	for (i = 0; i < old->num_hook_entries; i++) {
235 		if (orig_ops[i] == &dummy_ops)
236 			skip++;
237 	}
238 
239 	/* if skip == hook_entries all hooks have been removed */
240 	hook_entries = old->num_hook_entries;
241 	if (skip == hook_entries)
242 		goto out_assign;
243 
244 	if (skip == 0)
245 		return NULL;
246 
247 	hook_entries -= skip;
248 	new = allocate_hook_entries_size(hook_entries);
249 	if (!new)
250 		return NULL;
251 
252 	new_ops = nf_hook_entries_get_hook_ops(new);
253 	for (i = 0, j = 0; i < old->num_hook_entries; i++) {
254 		if (orig_ops[i] == &dummy_ops)
255 			continue;
256 		new->hooks[j] = old->hooks[i];
257 		new_ops[j] = (void *)orig_ops[i];
258 		j++;
259 	}
260 	hooks_validate(new);
261 out_assign:
262 	rcu_assign_pointer(*pp, new);
263 	return old;
264 }
265 
266 static struct nf_hook_entries __rcu **
267 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
268 		   struct net_device *dev)
269 {
270 	switch (pf) {
271 	case NFPROTO_NETDEV:
272 		break;
273 #ifdef CONFIG_NETFILTER_FAMILY_ARP
274 	case NFPROTO_ARP:
275 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
276 			return NULL;
277 		return net->nf.hooks_arp + hooknum;
278 #endif
279 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
280 	case NFPROTO_BRIDGE:
281 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
282 			return NULL;
283 		return net->nf.hooks_bridge + hooknum;
284 #endif
285 #ifdef CONFIG_NETFILTER_INGRESS
286 	case NFPROTO_INET:
287 		if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
288 			return NULL;
289 		if (!dev || dev_net(dev) != net) {
290 			WARN_ON_ONCE(1);
291 			return NULL;
292 		}
293 		return &dev->nf_hooks_ingress;
294 #endif
295 	case NFPROTO_IPV4:
296 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
297 			return NULL;
298 		return net->nf.hooks_ipv4 + hooknum;
299 	case NFPROTO_IPV6:
300 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
301 			return NULL;
302 		return net->nf.hooks_ipv6 + hooknum;
303 	default:
304 		WARN_ON_ONCE(1);
305 		return NULL;
306 	}
307 
308 #ifdef CONFIG_NETFILTER_INGRESS
309 	if (hooknum == NF_NETDEV_INGRESS) {
310 		if (dev && dev_net(dev) == net)
311 			return &dev->nf_hooks_ingress;
312 	}
313 #endif
314 #ifdef CONFIG_NETFILTER_EGRESS
315 	if (hooknum == NF_NETDEV_EGRESS) {
316 		if (dev && dev_net(dev) == net)
317 			return &dev->nf_hooks_egress;
318 	}
319 #endif
320 	WARN_ON_ONCE(1);
321 	return NULL;
322 }
323 
324 static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
325 			    int hooknum)
326 {
327 #ifndef CONFIG_NETFILTER_INGRESS
328 	if (reg->hooknum == hooknum)
329 		return -EOPNOTSUPP;
330 #endif
331 	if (reg->hooknum != hooknum ||
332 	    !reg->dev || dev_net(reg->dev) != net)
333 		return -EINVAL;
334 
335 	return 0;
336 }
337 
338 static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
339 						  int pf)
340 {
341 	if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
342 	    (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
343 		return true;
344 
345 	return false;
346 }
347 
348 static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
349 						 int pf)
350 {
351 	return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
352 }
353 
354 static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
355 {
356 #ifdef CONFIG_JUMP_LABEL
357 	int hooknum;
358 
359 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
360 		pf = NFPROTO_NETDEV;
361 		hooknum = NF_NETDEV_INGRESS;
362 	} else {
363 		hooknum = reg->hooknum;
364 	}
365 	static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
366 #endif
367 }
368 
369 static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
370 {
371 #ifdef CONFIG_JUMP_LABEL
372 	int hooknum;
373 
374 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
375 		pf = NFPROTO_NETDEV;
376 		hooknum = NF_NETDEV_INGRESS;
377 	} else {
378 		hooknum = reg->hooknum;
379 	}
380 	static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
381 #endif
382 }
383 
384 static int __nf_register_net_hook(struct net *net, int pf,
385 				  const struct nf_hook_ops *reg)
386 {
387 	struct nf_hook_entries *p, *new_hooks;
388 	struct nf_hook_entries __rcu **pp;
389 	int err;
390 
391 	switch (pf) {
392 	case NFPROTO_NETDEV:
393 #ifndef CONFIG_NETFILTER_INGRESS
394 		if (reg->hooknum == NF_NETDEV_INGRESS)
395 			return -EOPNOTSUPP;
396 #endif
397 #ifndef CONFIG_NETFILTER_EGRESS
398 		if (reg->hooknum == NF_NETDEV_EGRESS)
399 			return -EOPNOTSUPP;
400 #endif
401 		if ((reg->hooknum != NF_NETDEV_INGRESS &&
402 		     reg->hooknum != NF_NETDEV_EGRESS) ||
403 		    !reg->dev || dev_net(reg->dev) != net)
404 			return -EINVAL;
405 		break;
406 	case NFPROTO_INET:
407 		if (reg->hooknum != NF_INET_INGRESS)
408 			break;
409 
410 		err = nf_ingress_check(net, reg, NF_INET_INGRESS);
411 		if (err < 0)
412 			return err;
413 		break;
414 	}
415 
416 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
417 	if (!pp)
418 		return -EINVAL;
419 
420 	mutex_lock(&nf_hook_mutex);
421 
422 	p = nf_entry_dereference(*pp);
423 	new_hooks = nf_hook_entries_grow(p, reg);
424 
425 	if (!IS_ERR(new_hooks)) {
426 		hooks_validate(new_hooks);
427 		rcu_assign_pointer(*pp, new_hooks);
428 	}
429 
430 	mutex_unlock(&nf_hook_mutex);
431 	if (IS_ERR(new_hooks))
432 		return PTR_ERR(new_hooks);
433 
434 #ifdef CONFIG_NETFILTER_INGRESS
435 	if (nf_ingress_hook(reg, pf))
436 		net_inc_ingress_queue();
437 #endif
438 #ifdef CONFIG_NETFILTER_EGRESS
439 	if (nf_egress_hook(reg, pf))
440 		net_inc_egress_queue();
441 #endif
442 	nf_static_key_inc(reg, pf);
443 
444 	BUG_ON(p == new_hooks);
445 	nf_hook_entries_free(p);
446 	return 0;
447 }
448 
449 /*
450  * nf_remove_net_hook - remove a hook from blob
451  *
452  * @oldp: current address of hook blob
453  * @unreg: hook to unregister
454  *
455  * This cannot fail, hook unregistration must always succeed.
456  * Therefore replace the to-be-removed hook with a dummy hook.
457  */
458 static bool nf_remove_net_hook(struct nf_hook_entries *old,
459 			       const struct nf_hook_ops *unreg)
460 {
461 	struct nf_hook_ops **orig_ops;
462 	unsigned int i;
463 
464 	orig_ops = nf_hook_entries_get_hook_ops(old);
465 	for (i = 0; i < old->num_hook_entries; i++) {
466 		if (orig_ops[i] != unreg)
467 			continue;
468 		WRITE_ONCE(old->hooks[i].hook, accept_all);
469 		WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
470 		return true;
471 	}
472 
473 	return false;
474 }
475 
476 static void __nf_unregister_net_hook(struct net *net, int pf,
477 				     const struct nf_hook_ops *reg)
478 {
479 	struct nf_hook_entries __rcu **pp;
480 	struct nf_hook_entries *p;
481 
482 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
483 	if (!pp)
484 		return;
485 
486 	mutex_lock(&nf_hook_mutex);
487 
488 	p = nf_entry_dereference(*pp);
489 	if (WARN_ON_ONCE(!p)) {
490 		mutex_unlock(&nf_hook_mutex);
491 		return;
492 	}
493 
494 	if (nf_remove_net_hook(p, reg)) {
495 #ifdef CONFIG_NETFILTER_INGRESS
496 		if (nf_ingress_hook(reg, pf))
497 			net_dec_ingress_queue();
498 #endif
499 #ifdef CONFIG_NETFILTER_EGRESS
500 		if (nf_egress_hook(reg, pf))
501 			net_dec_egress_queue();
502 #endif
503 		nf_static_key_dec(reg, pf);
504 	} else {
505 		WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
506 	}
507 
508 	p = __nf_hook_entries_try_shrink(p, pp);
509 	mutex_unlock(&nf_hook_mutex);
510 	if (!p)
511 		return;
512 
513 	nf_queue_nf_hook_drop(net);
514 	nf_hook_entries_free(p);
515 }
516 
517 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
518 {
519 	if (reg->pf == NFPROTO_INET) {
520 		if (reg->hooknum == NF_INET_INGRESS) {
521 			__nf_unregister_net_hook(net, NFPROTO_INET, reg);
522 		} else {
523 			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
524 			__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
525 		}
526 	} else {
527 		__nf_unregister_net_hook(net, reg->pf, reg);
528 	}
529 }
530 EXPORT_SYMBOL(nf_unregister_net_hook);
531 
532 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
533 				const struct nf_hook_ops *reg)
534 {
535 	struct nf_hook_entries *p;
536 
537 	p = rcu_dereference_raw(*pp);
538 	if (nf_remove_net_hook(p, reg)) {
539 		p = __nf_hook_entries_try_shrink(p, pp);
540 		nf_hook_entries_free(p);
541 	}
542 }
543 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
544 
545 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
546 {
547 	int err;
548 
549 	if (reg->pf == NFPROTO_INET) {
550 		if (reg->hooknum == NF_INET_INGRESS) {
551 			err = __nf_register_net_hook(net, NFPROTO_INET, reg);
552 			if (err < 0)
553 				return err;
554 		} else {
555 			err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
556 			if (err < 0)
557 				return err;
558 
559 			err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
560 			if (err < 0) {
561 				__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
562 				return err;
563 			}
564 		}
565 	} else {
566 		err = __nf_register_net_hook(net, reg->pf, reg);
567 		if (err < 0)
568 			return err;
569 	}
570 
571 	return 0;
572 }
573 EXPORT_SYMBOL(nf_register_net_hook);
574 
575 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
576 			  unsigned int n)
577 {
578 	unsigned int i;
579 	int err = 0;
580 
581 	for (i = 0; i < n; i++) {
582 		err = nf_register_net_hook(net, &reg[i]);
583 		if (err)
584 			goto err;
585 	}
586 	return err;
587 
588 err:
589 	if (i > 0)
590 		nf_unregister_net_hooks(net, reg, i);
591 	return err;
592 }
593 EXPORT_SYMBOL(nf_register_net_hooks);
594 
595 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
596 			     unsigned int hookcount)
597 {
598 	unsigned int i;
599 
600 	for (i = 0; i < hookcount; i++)
601 		nf_unregister_net_hook(net, &reg[i]);
602 }
603 EXPORT_SYMBOL(nf_unregister_net_hooks);
604 
605 /* Returns 1 if okfn() needs to be executed by the caller,
606  * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
607 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
608 		 const struct nf_hook_entries *e, unsigned int s)
609 {
610 	unsigned int verdict;
611 	int ret;
612 
613 	for (; s < e->num_hook_entries; s++) {
614 		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
615 		switch (verdict & NF_VERDICT_MASK) {
616 		case NF_ACCEPT:
617 			break;
618 		case NF_DROP:
619 			kfree_skb_reason(skb,
620 					 SKB_DROP_REASON_NETFILTER_DROP);
621 			ret = NF_DROP_GETERR(verdict);
622 			if (ret == 0)
623 				ret = -EPERM;
624 			return ret;
625 		case NF_QUEUE:
626 			ret = nf_queue(skb, state, s, verdict);
627 			if (ret == 1)
628 				continue;
629 			return ret;
630 		default:
631 			/* Implicit handling for NF_STOLEN, as well as any other
632 			 * non conventional verdicts.
633 			 */
634 			return 0;
635 		}
636 	}
637 
638 	return 1;
639 }
640 EXPORT_SYMBOL(nf_hook_slow);
641 
642 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
643 		       const struct nf_hook_entries *e)
644 {
645 	struct sk_buff *skb, *next;
646 	struct list_head sublist;
647 	int ret;
648 
649 	INIT_LIST_HEAD(&sublist);
650 
651 	list_for_each_entry_safe(skb, next, head, list) {
652 		skb_list_del_init(skb);
653 		ret = nf_hook_slow(skb, state, e, 0);
654 		if (ret == 1)
655 			list_add_tail(&skb->list, &sublist);
656 	}
657 	/* Put passed packets back on main list */
658 	list_splice(&sublist, head);
659 }
660 EXPORT_SYMBOL(nf_hook_slow_list);
661 
662 /* This needs to be compiled in any case to avoid dependencies between the
663  * nfnetlink_queue code and nf_conntrack.
664  */
665 const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
666 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
667 
668 const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
669 EXPORT_SYMBOL_GPL(nf_ct_hook);
670 
671 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
672 u8 nf_ctnetlink_has_listener;
673 EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener);
674 
675 const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
676 EXPORT_SYMBOL_GPL(nf_nat_hook);
677 
678 /* This does not belong here, but locally generated errors need it if connection
679  * tracking in use: without this, connection may not be in hash table, and hence
680  * manufactured ICMP or RST packets will not be associated with it.
681  */
682 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
683 {
684 	const struct nf_ct_hook *ct_hook;
685 
686 	if (skb->_nfct) {
687 		rcu_read_lock();
688 		ct_hook = rcu_dereference(nf_ct_hook);
689 		if (ct_hook)
690 			ct_hook->attach(new, skb);
691 		rcu_read_unlock();
692 	}
693 }
694 EXPORT_SYMBOL(nf_ct_attach);
695 
696 void nf_conntrack_destroy(struct nf_conntrack *nfct)
697 {
698 	const struct nf_ct_hook *ct_hook;
699 
700 	rcu_read_lock();
701 	ct_hook = rcu_dereference(nf_ct_hook);
702 	BUG_ON(ct_hook == NULL);
703 	ct_hook->destroy(nfct);
704 	rcu_read_unlock();
705 }
706 EXPORT_SYMBOL(nf_conntrack_destroy);
707 
708 void nf_ct_set_closing(struct nf_conntrack *nfct)
709 {
710 	const struct nf_ct_hook *ct_hook;
711 
712 	if (!nfct)
713 		return;
714 
715 	rcu_read_lock();
716 	ct_hook = rcu_dereference(nf_ct_hook);
717 	if (ct_hook)
718 		ct_hook->set_closing(nfct);
719 
720 	rcu_read_unlock();
721 }
722 EXPORT_SYMBOL_GPL(nf_ct_set_closing);
723 
724 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
725 			 const struct sk_buff *skb)
726 {
727 	const struct nf_ct_hook *ct_hook;
728 	bool ret = false;
729 
730 	rcu_read_lock();
731 	ct_hook = rcu_dereference(nf_ct_hook);
732 	if (ct_hook)
733 		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
734 	rcu_read_unlock();
735 	return ret;
736 }
737 EXPORT_SYMBOL(nf_ct_get_tuple_skb);
738 
739 /* Built-in default zone used e.g. by modules. */
740 const struct nf_conntrack_zone nf_ct_zone_dflt = {
741 	.id	= NF_CT_DEFAULT_ZONE_ID,
742 	.dir	= NF_CT_DEFAULT_ZONE_DIR,
743 };
744 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
745 #endif /* CONFIG_NF_CONNTRACK */
746 
747 static void __net_init
748 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
749 {
750 	int h;
751 
752 	for (h = 0; h < max; h++)
753 		RCU_INIT_POINTER(e[h], NULL);
754 }
755 
756 static int __net_init netfilter_net_init(struct net *net)
757 {
758 	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
759 	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
760 #ifdef CONFIG_NETFILTER_FAMILY_ARP
761 	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
762 #endif
763 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
764 	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
765 #endif
766 #ifdef CONFIG_PROC_FS
767 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
768 						net->proc_net);
769 	if (!net->nf.proc_netfilter) {
770 		if (!net_eq(net, &init_net))
771 			pr_err("cannot create netfilter proc entry");
772 
773 		return -ENOMEM;
774 	}
775 #endif
776 
777 	return 0;
778 }
779 
780 static void __net_exit netfilter_net_exit(struct net *net)
781 {
782 	remove_proc_entry("netfilter", net->proc_net);
783 }
784 
785 static struct pernet_operations netfilter_net_ops = {
786 	.init = netfilter_net_init,
787 	.exit = netfilter_net_exit,
788 };
789 
790 int __init netfilter_init(void)
791 {
792 	int ret;
793 
794 	ret = register_pernet_subsys(&netfilter_net_ops);
795 	if (ret < 0)
796 		goto err;
797 
798 	ret = netfilter_log_init();
799 	if (ret < 0)
800 		goto err_pernet;
801 
802 	return 0;
803 err_pernet:
804 	unregister_pernet_subsys(&netfilter_net_ops);
805 err:
806 	return ret;
807 }
808