xref: /openbmc/linux/net/netfilter/core.c (revision ffdd33dd)
1 /* netfilter.c: look after the filters for various protocols.
2  * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3  *
4  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5  * way.
6  *
7  * This code is GPL.
8  */
9 #include <linux/kernel.h>
10 #include <linux/netfilter.h>
11 #include <net/protocol.h>
12 #include <linux/init.h>
13 #include <linux/skbuff.h>
14 #include <linux/wait.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/if.h>
18 #include <linux/netdevice.h>
19 #include <linux/netfilter_ipv6.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/rcupdate.h>
25 #include <net/net_namespace.h>
26 #include <net/netfilter/nf_queue.h>
27 #include <net/sock.h>
28 
29 #include "nf_internals.h"
30 
31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33 
34 DEFINE_PER_CPU(bool, nf_skb_duplicated);
35 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36 
37 #ifdef CONFIG_JUMP_LABEL
38 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 EXPORT_SYMBOL(nf_hooks_needed);
40 #endif
41 
42 static DEFINE_MUTEX(nf_hook_mutex);
43 
44 /* max hooks per family/hooknum */
45 #define MAX_HOOK_COUNT		1024
46 
47 #define nf_entry_dereference(e) \
48 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
49 
50 static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
51 {
52 	struct nf_hook_entries *e;
53 	size_t alloc = sizeof(*e) +
54 		       sizeof(struct nf_hook_entry) * num +
55 		       sizeof(struct nf_hook_ops *) * num +
56 		       sizeof(struct nf_hook_entries_rcu_head);
57 
58 	if (num == 0)
59 		return NULL;
60 
61 	e = kvzalloc(alloc, GFP_KERNEL);
62 	if (e)
63 		e->num_hook_entries = num;
64 	return e;
65 }
66 
67 static void __nf_hook_entries_free(struct rcu_head *h)
68 {
69 	struct nf_hook_entries_rcu_head *head;
70 
71 	head = container_of(h, struct nf_hook_entries_rcu_head, head);
72 	kvfree(head->allocation);
73 }
74 
75 static void nf_hook_entries_free(struct nf_hook_entries *e)
76 {
77 	struct nf_hook_entries_rcu_head *head;
78 	struct nf_hook_ops **ops;
79 	unsigned int num;
80 
81 	if (!e)
82 		return;
83 
84 	num = e->num_hook_entries;
85 	ops = nf_hook_entries_get_hook_ops(e);
86 	head = (void *)&ops[num];
87 	head->allocation = e;
88 	call_rcu(&head->head, __nf_hook_entries_free);
89 }
90 
91 static unsigned int accept_all(void *priv,
92 			       struct sk_buff *skb,
93 			       const struct nf_hook_state *state)
94 {
95 	return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
96 }
97 
98 static const struct nf_hook_ops dummy_ops = {
99 	.hook = accept_all,
100 	.priority = INT_MIN,
101 };
102 
103 static struct nf_hook_entries *
104 nf_hook_entries_grow(const struct nf_hook_entries *old,
105 		     const struct nf_hook_ops *reg)
106 {
107 	unsigned int i, alloc_entries, nhooks, old_entries;
108 	struct nf_hook_ops **orig_ops = NULL;
109 	struct nf_hook_ops **new_ops;
110 	struct nf_hook_entries *new;
111 	bool inserted = false;
112 
113 	alloc_entries = 1;
114 	old_entries = old ? old->num_hook_entries : 0;
115 
116 	if (old) {
117 		orig_ops = nf_hook_entries_get_hook_ops(old);
118 
119 		for (i = 0; i < old_entries; i++) {
120 			if (orig_ops[i] != &dummy_ops)
121 				alloc_entries++;
122 		}
123 	}
124 
125 	if (alloc_entries > MAX_HOOK_COUNT)
126 		return ERR_PTR(-E2BIG);
127 
128 	new = allocate_hook_entries_size(alloc_entries);
129 	if (!new)
130 		return ERR_PTR(-ENOMEM);
131 
132 	new_ops = nf_hook_entries_get_hook_ops(new);
133 
134 	i = 0;
135 	nhooks = 0;
136 	while (i < old_entries) {
137 		if (orig_ops[i] == &dummy_ops) {
138 			++i;
139 			continue;
140 		}
141 
142 		if (inserted || reg->priority > orig_ops[i]->priority) {
143 			new_ops[nhooks] = (void *)orig_ops[i];
144 			new->hooks[nhooks] = old->hooks[i];
145 			i++;
146 		} else {
147 			new_ops[nhooks] = (void *)reg;
148 			new->hooks[nhooks].hook = reg->hook;
149 			new->hooks[nhooks].priv = reg->priv;
150 			inserted = true;
151 		}
152 		nhooks++;
153 	}
154 
155 	if (!inserted) {
156 		new_ops[nhooks] = (void *)reg;
157 		new->hooks[nhooks].hook = reg->hook;
158 		new->hooks[nhooks].priv = reg->priv;
159 	}
160 
161 	return new;
162 }
163 
164 static void hooks_validate(const struct nf_hook_entries *hooks)
165 {
166 #ifdef CONFIG_DEBUG_MISC
167 	struct nf_hook_ops **orig_ops;
168 	int prio = INT_MIN;
169 	size_t i = 0;
170 
171 	orig_ops = nf_hook_entries_get_hook_ops(hooks);
172 
173 	for (i = 0; i < hooks->num_hook_entries; i++) {
174 		if (orig_ops[i] == &dummy_ops)
175 			continue;
176 
177 		WARN_ON(orig_ops[i]->priority < prio);
178 
179 		if (orig_ops[i]->priority > prio)
180 			prio = orig_ops[i]->priority;
181 	}
182 #endif
183 }
184 
185 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
186 				const struct nf_hook_ops *reg)
187 {
188 	struct nf_hook_entries *new_hooks;
189 	struct nf_hook_entries *p;
190 
191 	p = rcu_dereference_raw(*pp);
192 	new_hooks = nf_hook_entries_grow(p, reg);
193 	if (IS_ERR(new_hooks))
194 		return PTR_ERR(new_hooks);
195 
196 	hooks_validate(new_hooks);
197 
198 	rcu_assign_pointer(*pp, new_hooks);
199 
200 	BUG_ON(p == new_hooks);
201 	nf_hook_entries_free(p);
202 	return 0;
203 }
204 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
205 
206 /*
207  * __nf_hook_entries_try_shrink - try to shrink hook array
208  *
209  * @old -- current hook blob at @pp
210  * @pp -- location of hook blob
211  *
212  * Hook unregistration must always succeed, so to-be-removed hooks
213  * are replaced by a dummy one that will just move to next hook.
214  *
215  * This counts the current dummy hooks, attempts to allocate new blob,
216  * copies the live hooks, then replaces and discards old one.
217  *
218  * return values:
219  *
220  * Returns address to free, or NULL.
221  */
222 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
223 					  struct nf_hook_entries __rcu **pp)
224 {
225 	unsigned int i, j, skip = 0, hook_entries;
226 	struct nf_hook_entries *new = NULL;
227 	struct nf_hook_ops **orig_ops;
228 	struct nf_hook_ops **new_ops;
229 
230 	if (WARN_ON_ONCE(!old))
231 		return NULL;
232 
233 	orig_ops = nf_hook_entries_get_hook_ops(old);
234 	for (i = 0; i < old->num_hook_entries; i++) {
235 		if (orig_ops[i] == &dummy_ops)
236 			skip++;
237 	}
238 
239 	/* if skip == hook_entries all hooks have been removed */
240 	hook_entries = old->num_hook_entries;
241 	if (skip == hook_entries)
242 		goto out_assign;
243 
244 	if (skip == 0)
245 		return NULL;
246 
247 	hook_entries -= skip;
248 	new = allocate_hook_entries_size(hook_entries);
249 	if (!new)
250 		return NULL;
251 
252 	new_ops = nf_hook_entries_get_hook_ops(new);
253 	for (i = 0, j = 0; i < old->num_hook_entries; i++) {
254 		if (orig_ops[i] == &dummy_ops)
255 			continue;
256 		new->hooks[j] = old->hooks[i];
257 		new_ops[j] = (void *)orig_ops[i];
258 		j++;
259 	}
260 	hooks_validate(new);
261 out_assign:
262 	rcu_assign_pointer(*pp, new);
263 	return old;
264 }
265 
266 static struct nf_hook_entries __rcu **
267 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
268 		   struct net_device *dev)
269 {
270 	switch (pf) {
271 	case NFPROTO_NETDEV:
272 		break;
273 #ifdef CONFIG_NETFILTER_FAMILY_ARP
274 	case NFPROTO_ARP:
275 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
276 			return NULL;
277 		return net->nf.hooks_arp + hooknum;
278 #endif
279 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
280 	case NFPROTO_BRIDGE:
281 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
282 			return NULL;
283 		return net->nf.hooks_bridge + hooknum;
284 #endif
285 #ifdef CONFIG_NETFILTER_INGRESS
286 	case NFPROTO_INET:
287 		if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
288 			return NULL;
289 		if (!dev || dev_net(dev) != net) {
290 			WARN_ON_ONCE(1);
291 			return NULL;
292 		}
293 		return &dev->nf_hooks_ingress;
294 #endif
295 	case NFPROTO_IPV4:
296 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
297 			return NULL;
298 		return net->nf.hooks_ipv4 + hooknum;
299 	case NFPROTO_IPV6:
300 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
301 			return NULL;
302 		return net->nf.hooks_ipv6 + hooknum;
303 #if IS_ENABLED(CONFIG_DECNET)
304 	case NFPROTO_DECNET:
305 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
306 			return NULL;
307 		return net->nf.hooks_decnet + hooknum;
308 #endif
309 	default:
310 		WARN_ON_ONCE(1);
311 		return NULL;
312 	}
313 
314 #ifdef CONFIG_NETFILTER_INGRESS
315 	if (hooknum == NF_NETDEV_INGRESS) {
316 		if (dev && dev_net(dev) == net)
317 			return &dev->nf_hooks_ingress;
318 	}
319 #endif
320 #ifdef CONFIG_NETFILTER_EGRESS
321 	if (hooknum == NF_NETDEV_EGRESS) {
322 		if (dev && dev_net(dev) == net)
323 			return &dev->nf_hooks_egress;
324 	}
325 #endif
326 	WARN_ON_ONCE(1);
327 	return NULL;
328 }
329 
330 static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
331 			    int hooknum)
332 {
333 #ifndef CONFIG_NETFILTER_INGRESS
334 	if (reg->hooknum == hooknum)
335 		return -EOPNOTSUPP;
336 #endif
337 	if (reg->hooknum != hooknum ||
338 	    !reg->dev || dev_net(reg->dev) != net)
339 		return -EINVAL;
340 
341 	return 0;
342 }
343 
344 static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
345 						  int pf)
346 {
347 	if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
348 	    (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
349 		return true;
350 
351 	return false;
352 }
353 
354 static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
355 						 int pf)
356 {
357 	return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
358 }
359 
360 static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
361 {
362 #ifdef CONFIG_JUMP_LABEL
363 	int hooknum;
364 
365 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
366 		pf = NFPROTO_NETDEV;
367 		hooknum = NF_NETDEV_INGRESS;
368 	} else {
369 		hooknum = reg->hooknum;
370 	}
371 	static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
372 #endif
373 }
374 
375 static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
376 {
377 #ifdef CONFIG_JUMP_LABEL
378 	int hooknum;
379 
380 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
381 		pf = NFPROTO_NETDEV;
382 		hooknum = NF_NETDEV_INGRESS;
383 	} else {
384 		hooknum = reg->hooknum;
385 	}
386 	static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
387 #endif
388 }
389 
390 static int __nf_register_net_hook(struct net *net, int pf,
391 				  const struct nf_hook_ops *reg)
392 {
393 	struct nf_hook_entries *p, *new_hooks;
394 	struct nf_hook_entries __rcu **pp;
395 	int err;
396 
397 	switch (pf) {
398 	case NFPROTO_NETDEV:
399 #ifndef CONFIG_NETFILTER_INGRESS
400 		if (reg->hooknum == NF_NETDEV_INGRESS)
401 			return -EOPNOTSUPP;
402 #endif
403 #ifndef CONFIG_NETFILTER_EGRESS
404 		if (reg->hooknum == NF_NETDEV_EGRESS)
405 			return -EOPNOTSUPP;
406 #endif
407 		if ((reg->hooknum != NF_NETDEV_INGRESS &&
408 		     reg->hooknum != NF_NETDEV_EGRESS) ||
409 		    !reg->dev || dev_net(reg->dev) != net)
410 			return -EINVAL;
411 		break;
412 	case NFPROTO_INET:
413 		if (reg->hooknum != NF_INET_INGRESS)
414 			break;
415 
416 		err = nf_ingress_check(net, reg, NF_INET_INGRESS);
417 		if (err < 0)
418 			return err;
419 		break;
420 	}
421 
422 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
423 	if (!pp)
424 		return -EINVAL;
425 
426 	mutex_lock(&nf_hook_mutex);
427 
428 	p = nf_entry_dereference(*pp);
429 	new_hooks = nf_hook_entries_grow(p, reg);
430 
431 	if (!IS_ERR(new_hooks))
432 		rcu_assign_pointer(*pp, new_hooks);
433 
434 	mutex_unlock(&nf_hook_mutex);
435 	if (IS_ERR(new_hooks))
436 		return PTR_ERR(new_hooks);
437 
438 	hooks_validate(new_hooks);
439 #ifdef CONFIG_NETFILTER_INGRESS
440 	if (nf_ingress_hook(reg, pf))
441 		net_inc_ingress_queue();
442 #endif
443 #ifdef CONFIG_NETFILTER_EGRESS
444 	if (nf_egress_hook(reg, pf))
445 		net_inc_egress_queue();
446 #endif
447 	nf_static_key_inc(reg, pf);
448 
449 	BUG_ON(p == new_hooks);
450 	nf_hook_entries_free(p);
451 	return 0;
452 }
453 
454 /*
455  * nf_remove_net_hook - remove a hook from blob
456  *
457  * @oldp: current address of hook blob
458  * @unreg: hook to unregister
459  *
460  * This cannot fail, hook unregistration must always succeed.
461  * Therefore replace the to-be-removed hook with a dummy hook.
462  */
463 static bool nf_remove_net_hook(struct nf_hook_entries *old,
464 			       const struct nf_hook_ops *unreg)
465 {
466 	struct nf_hook_ops **orig_ops;
467 	unsigned int i;
468 
469 	orig_ops = nf_hook_entries_get_hook_ops(old);
470 	for (i = 0; i < old->num_hook_entries; i++) {
471 		if (orig_ops[i] != unreg)
472 			continue;
473 		WRITE_ONCE(old->hooks[i].hook, accept_all);
474 		WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
475 		return true;
476 	}
477 
478 	return false;
479 }
480 
481 static void __nf_unregister_net_hook(struct net *net, int pf,
482 				     const struct nf_hook_ops *reg)
483 {
484 	struct nf_hook_entries __rcu **pp;
485 	struct nf_hook_entries *p;
486 
487 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
488 	if (!pp)
489 		return;
490 
491 	mutex_lock(&nf_hook_mutex);
492 
493 	p = nf_entry_dereference(*pp);
494 	if (WARN_ON_ONCE(!p)) {
495 		mutex_unlock(&nf_hook_mutex);
496 		return;
497 	}
498 
499 	if (nf_remove_net_hook(p, reg)) {
500 #ifdef CONFIG_NETFILTER_INGRESS
501 		if (nf_ingress_hook(reg, pf))
502 			net_dec_ingress_queue();
503 #endif
504 #ifdef CONFIG_NETFILTER_EGRESS
505 		if (nf_egress_hook(reg, pf))
506 			net_dec_egress_queue();
507 #endif
508 		nf_static_key_dec(reg, pf);
509 	} else {
510 		WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
511 	}
512 
513 	p = __nf_hook_entries_try_shrink(p, pp);
514 	mutex_unlock(&nf_hook_mutex);
515 	if (!p)
516 		return;
517 
518 	nf_queue_nf_hook_drop(net);
519 	nf_hook_entries_free(p);
520 }
521 
522 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
523 {
524 	if (reg->pf == NFPROTO_INET) {
525 		if (reg->hooknum == NF_INET_INGRESS) {
526 			__nf_unregister_net_hook(net, NFPROTO_INET, reg);
527 		} else {
528 			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
529 			__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
530 		}
531 	} else {
532 		__nf_unregister_net_hook(net, reg->pf, reg);
533 	}
534 }
535 EXPORT_SYMBOL(nf_unregister_net_hook);
536 
537 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
538 				const struct nf_hook_ops *reg)
539 {
540 	struct nf_hook_entries *p;
541 
542 	p = rcu_dereference_raw(*pp);
543 	if (nf_remove_net_hook(p, reg)) {
544 		p = __nf_hook_entries_try_shrink(p, pp);
545 		nf_hook_entries_free(p);
546 	}
547 }
548 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
549 
550 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
551 {
552 	int err;
553 
554 	if (reg->pf == NFPROTO_INET) {
555 		if (reg->hooknum == NF_INET_INGRESS) {
556 			err = __nf_register_net_hook(net, NFPROTO_INET, reg);
557 			if (err < 0)
558 				return err;
559 		} else {
560 			err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
561 			if (err < 0)
562 				return err;
563 
564 			err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
565 			if (err < 0) {
566 				__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
567 				return err;
568 			}
569 		}
570 	} else {
571 		err = __nf_register_net_hook(net, reg->pf, reg);
572 		if (err < 0)
573 			return err;
574 	}
575 
576 	return 0;
577 }
578 EXPORT_SYMBOL(nf_register_net_hook);
579 
580 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
581 			  unsigned int n)
582 {
583 	unsigned int i;
584 	int err = 0;
585 
586 	for (i = 0; i < n; i++) {
587 		err = nf_register_net_hook(net, &reg[i]);
588 		if (err)
589 			goto err;
590 	}
591 	return err;
592 
593 err:
594 	if (i > 0)
595 		nf_unregister_net_hooks(net, reg, i);
596 	return err;
597 }
598 EXPORT_SYMBOL(nf_register_net_hooks);
599 
600 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
601 			     unsigned int hookcount)
602 {
603 	unsigned int i;
604 
605 	for (i = 0; i < hookcount; i++)
606 		nf_unregister_net_hook(net, &reg[i]);
607 }
608 EXPORT_SYMBOL(nf_unregister_net_hooks);
609 
610 /* Returns 1 if okfn() needs to be executed by the caller,
611  * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
612 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
613 		 const struct nf_hook_entries *e, unsigned int s)
614 {
615 	unsigned int verdict;
616 	int ret;
617 
618 	for (; s < e->num_hook_entries; s++) {
619 		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
620 		switch (verdict & NF_VERDICT_MASK) {
621 		case NF_ACCEPT:
622 			break;
623 		case NF_DROP:
624 			kfree_skb(skb);
625 			ret = NF_DROP_GETERR(verdict);
626 			if (ret == 0)
627 				ret = -EPERM;
628 			return ret;
629 		case NF_QUEUE:
630 			ret = nf_queue(skb, state, s, verdict);
631 			if (ret == 1)
632 				continue;
633 			return ret;
634 		default:
635 			/* Implicit handling for NF_STOLEN, as well as any other
636 			 * non conventional verdicts.
637 			 */
638 			return 0;
639 		}
640 	}
641 
642 	return 1;
643 }
644 EXPORT_SYMBOL(nf_hook_slow);
645 
646 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
647 		       const struct nf_hook_entries *e)
648 {
649 	struct sk_buff *skb, *next;
650 	struct list_head sublist;
651 	int ret;
652 
653 	INIT_LIST_HEAD(&sublist);
654 
655 	list_for_each_entry_safe(skb, next, head, list) {
656 		skb_list_del_init(skb);
657 		ret = nf_hook_slow(skb, state, e, 0);
658 		if (ret == 1)
659 			list_add_tail(&skb->list, &sublist);
660 	}
661 	/* Put passed packets back on main list */
662 	list_splice(&sublist, head);
663 }
664 EXPORT_SYMBOL(nf_hook_slow_list);
665 
666 /* This needs to be compiled in any case to avoid dependencies between the
667  * nfnetlink_queue code and nf_conntrack.
668  */
669 struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
670 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
671 
672 struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
673 EXPORT_SYMBOL_GPL(nf_ct_hook);
674 
675 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
676 /* This does not belong here, but locally generated errors need it if connection
677    tracking in use: without this, connection may not be in hash table, and hence
678    manufactured ICMP or RST packets will not be associated with it. */
679 void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
680 		__rcu __read_mostly;
681 EXPORT_SYMBOL(ip_ct_attach);
682 
683 struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
684 EXPORT_SYMBOL_GPL(nf_nat_hook);
685 
686 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
687 {
688 	void (*attach)(struct sk_buff *, const struct sk_buff *);
689 
690 	if (skb->_nfct) {
691 		rcu_read_lock();
692 		attach = rcu_dereference(ip_ct_attach);
693 		if (attach)
694 			attach(new, skb);
695 		rcu_read_unlock();
696 	}
697 }
698 EXPORT_SYMBOL(nf_ct_attach);
699 
700 void nf_conntrack_destroy(struct nf_conntrack *nfct)
701 {
702 	struct nf_ct_hook *ct_hook;
703 
704 	rcu_read_lock();
705 	ct_hook = rcu_dereference(nf_ct_hook);
706 	BUG_ON(ct_hook == NULL);
707 	ct_hook->destroy(nfct);
708 	rcu_read_unlock();
709 }
710 EXPORT_SYMBOL(nf_conntrack_destroy);
711 
712 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
713 			 const struct sk_buff *skb)
714 {
715 	struct nf_ct_hook *ct_hook;
716 	bool ret = false;
717 
718 	rcu_read_lock();
719 	ct_hook = rcu_dereference(nf_ct_hook);
720 	if (ct_hook)
721 		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
722 	rcu_read_unlock();
723 	return ret;
724 }
725 EXPORT_SYMBOL(nf_ct_get_tuple_skb);
726 
727 /* Built-in default zone used e.g. by modules. */
728 const struct nf_conntrack_zone nf_ct_zone_dflt = {
729 	.id	= NF_CT_DEFAULT_ZONE_ID,
730 	.dir	= NF_CT_DEFAULT_ZONE_DIR,
731 };
732 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
733 #endif /* CONFIG_NF_CONNTRACK */
734 
735 static void __net_init
736 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
737 {
738 	int h;
739 
740 	for (h = 0; h < max; h++)
741 		RCU_INIT_POINTER(e[h], NULL);
742 }
743 
744 static int __net_init netfilter_net_init(struct net *net)
745 {
746 	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
747 	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
748 #ifdef CONFIG_NETFILTER_FAMILY_ARP
749 	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
750 #endif
751 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
752 	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
753 #endif
754 #if IS_ENABLED(CONFIG_DECNET)
755 	__netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
756 #endif
757 
758 #ifdef CONFIG_PROC_FS
759 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
760 						net->proc_net);
761 	if (!net->nf.proc_netfilter) {
762 		if (!net_eq(net, &init_net))
763 			pr_err("cannot create netfilter proc entry");
764 
765 		return -ENOMEM;
766 	}
767 #endif
768 
769 	return 0;
770 }
771 
772 static void __net_exit netfilter_net_exit(struct net *net)
773 {
774 	remove_proc_entry("netfilter", net->proc_net);
775 }
776 
777 static struct pernet_operations netfilter_net_ops = {
778 	.init = netfilter_net_init,
779 	.exit = netfilter_net_exit,
780 };
781 
782 int __init netfilter_init(void)
783 {
784 	int ret;
785 
786 	ret = register_pernet_subsys(&netfilter_net_ops);
787 	if (ret < 0)
788 		goto err;
789 
790 	ret = netfilter_log_init();
791 	if (ret < 0)
792 		goto err_pernet;
793 
794 	return 0;
795 err_pernet:
796 	unregister_pernet_subsys(&netfilter_net_ops);
797 err:
798 	return ret;
799 }
800