xref: /openbmc/linux/net/netfilter/core.c (revision 5119e438)
1 /* netfilter.c: look after the filters for various protocols.
2  * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3  *
4  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5  * way.
6  *
7  * Rusty Russell (C)2000 -- This code is GPL.
8  * Patrick McHardy (c) 2006-2012
9  */
10 #include <linux/kernel.h>
11 #include <linux/netfilter.h>
12 #include <net/protocol.h>
13 #include <linux/init.h>
14 #include <linux/skbuff.h>
15 #include <linux/wait.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/if.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_ipv6.h>
21 #include <linux/inetdevice.h>
22 #include <linux/proc_fs.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/rcupdate.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 
29 #include "nf_internals.h"
30 
31 static DEFINE_MUTEX(afinfo_mutex);
32 
33 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
34 EXPORT_SYMBOL(nf_afinfo);
35 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
36 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
37 
38 DEFINE_PER_CPU(bool, nf_skb_duplicated);
39 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
40 
41 int nf_register_afinfo(const struct nf_afinfo *afinfo)
42 {
43 	mutex_lock(&afinfo_mutex);
44 	RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
45 	mutex_unlock(&afinfo_mutex);
46 	return 0;
47 }
48 EXPORT_SYMBOL_GPL(nf_register_afinfo);
49 
50 void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
51 {
52 	mutex_lock(&afinfo_mutex);
53 	RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
54 	mutex_unlock(&afinfo_mutex);
55 	synchronize_rcu();
56 }
57 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
58 
59 #ifdef HAVE_JUMP_LABEL
60 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
61 EXPORT_SYMBOL(nf_hooks_needed);
62 #endif
63 
64 static DEFINE_MUTEX(nf_hook_mutex);
65 #define nf_entry_dereference(e) \
66 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
67 
68 static struct nf_hook_entry *nf_hook_entry_head(struct net *net,
69 						const struct nf_hook_ops *reg)
70 {
71 	struct nf_hook_entry *hook_head = NULL;
72 
73 	if (reg->pf != NFPROTO_NETDEV)
74 		hook_head = nf_entry_dereference(net->nf.hooks[reg->pf]
75 						 [reg->hooknum]);
76 	else if (reg->hooknum == NF_NETDEV_INGRESS) {
77 #ifdef CONFIG_NETFILTER_INGRESS
78 		if (reg->dev && dev_net(reg->dev) == net)
79 			hook_head =
80 				nf_entry_dereference(
81 					reg->dev->nf_hooks_ingress);
82 #endif
83 	}
84 	return hook_head;
85 }
86 
87 /* must hold nf_hook_mutex */
88 static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg,
89 			      struct nf_hook_entry *entry)
90 {
91 	switch (reg->pf) {
92 	case NFPROTO_NETDEV:
93 		/* We already checked in nf_register_net_hook() that this is
94 		 * used from ingress.
95 		 */
96 		rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry);
97 		break;
98 	default:
99 		rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum],
100 				   entry);
101 		break;
102 	}
103 }
104 
105 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
106 {
107 	struct nf_hook_entry *hooks_entry;
108 	struct nf_hook_entry *entry;
109 
110 	if (reg->pf == NFPROTO_NETDEV &&
111 	    (reg->hooknum != NF_NETDEV_INGRESS ||
112 	     !reg->dev || dev_net(reg->dev) != net))
113 		return -EINVAL;
114 
115 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
116 	if (!entry)
117 		return -ENOMEM;
118 
119 	entry->orig_ops	= reg;
120 	entry->ops	= *reg;
121 	entry->next	= NULL;
122 
123 	mutex_lock(&nf_hook_mutex);
124 	hooks_entry = nf_hook_entry_head(net, reg);
125 
126 	if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) {
127 		/* This is the case where we need to insert at the head */
128 		entry->next = hooks_entry;
129 		hooks_entry = NULL;
130 	}
131 
132 	while (hooks_entry &&
133 		reg->priority >= hooks_entry->orig_ops->priority &&
134 		nf_entry_dereference(hooks_entry->next)) {
135 		hooks_entry = nf_entry_dereference(hooks_entry->next);
136 	}
137 
138 	if (hooks_entry) {
139 		entry->next = nf_entry_dereference(hooks_entry->next);
140 		rcu_assign_pointer(hooks_entry->next, entry);
141 	} else {
142 		nf_set_hooks_head(net, reg, entry);
143 	}
144 
145 	mutex_unlock(&nf_hook_mutex);
146 #ifdef CONFIG_NETFILTER_INGRESS
147 	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
148 		net_inc_ingress_queue();
149 #endif
150 #ifdef HAVE_JUMP_LABEL
151 	static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
152 #endif
153 	return 0;
154 }
155 EXPORT_SYMBOL(nf_register_net_hook);
156 
157 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
158 {
159 	struct nf_hook_entry *hooks_entry;
160 
161 	mutex_lock(&nf_hook_mutex);
162 	hooks_entry = nf_hook_entry_head(net, reg);
163 	if (hooks_entry && hooks_entry->orig_ops == reg) {
164 		nf_set_hooks_head(net, reg,
165 				  nf_entry_dereference(hooks_entry->next));
166 		goto unlock;
167 	}
168 	while (hooks_entry && nf_entry_dereference(hooks_entry->next)) {
169 		struct nf_hook_entry *next =
170 			nf_entry_dereference(hooks_entry->next);
171 		struct nf_hook_entry *nnext;
172 
173 		if (next->orig_ops != reg) {
174 			hooks_entry = next;
175 			continue;
176 		}
177 		nnext = nf_entry_dereference(next->next);
178 		rcu_assign_pointer(hooks_entry->next, nnext);
179 		hooks_entry = next;
180 		break;
181 	}
182 
183 unlock:
184 	mutex_unlock(&nf_hook_mutex);
185 	if (!hooks_entry) {
186 		WARN(1, "nf_unregister_net_hook: hook not found!\n");
187 		return;
188 	}
189 #ifdef CONFIG_NETFILTER_INGRESS
190 	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
191 		net_dec_ingress_queue();
192 #endif
193 #ifdef HAVE_JUMP_LABEL
194 	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
195 #endif
196 	synchronize_net();
197 	nf_queue_nf_hook_drop(net, hooks_entry);
198 	/* other cpu might still process nfqueue verdict that used reg */
199 	synchronize_net();
200 	kfree(hooks_entry);
201 }
202 EXPORT_SYMBOL(nf_unregister_net_hook);
203 
204 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
205 			  unsigned int n)
206 {
207 	unsigned int i;
208 	int err = 0;
209 
210 	for (i = 0; i < n; i++) {
211 		err = nf_register_net_hook(net, &reg[i]);
212 		if (err)
213 			goto err;
214 	}
215 	return err;
216 
217 err:
218 	if (i > 0)
219 		nf_unregister_net_hooks(net, reg, i);
220 	return err;
221 }
222 EXPORT_SYMBOL(nf_register_net_hooks);
223 
224 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
225 			     unsigned int n)
226 {
227 	while (n-- > 0)
228 		nf_unregister_net_hook(net, &reg[n]);
229 }
230 EXPORT_SYMBOL(nf_unregister_net_hooks);
231 
232 static LIST_HEAD(nf_hook_list);
233 
234 static int _nf_register_hook(struct nf_hook_ops *reg)
235 {
236 	struct net *net, *last;
237 	int ret;
238 
239 	for_each_net(net) {
240 		ret = nf_register_net_hook(net, reg);
241 		if (ret && ret != -ENOENT)
242 			goto rollback;
243 	}
244 	list_add_tail(&reg->list, &nf_hook_list);
245 
246 	return 0;
247 rollback:
248 	last = net;
249 	for_each_net(net) {
250 		if (net == last)
251 			break;
252 		nf_unregister_net_hook(net, reg);
253 	}
254 	return ret;
255 }
256 
257 int nf_register_hook(struct nf_hook_ops *reg)
258 {
259 	int ret;
260 
261 	rtnl_lock();
262 	ret = _nf_register_hook(reg);
263 	rtnl_unlock();
264 
265 	return ret;
266 }
267 EXPORT_SYMBOL(nf_register_hook);
268 
269 static void _nf_unregister_hook(struct nf_hook_ops *reg)
270 {
271 	struct net *net;
272 
273 	list_del(&reg->list);
274 	for_each_net(net)
275 		nf_unregister_net_hook(net, reg);
276 }
277 
278 void nf_unregister_hook(struct nf_hook_ops *reg)
279 {
280 	rtnl_lock();
281 	_nf_unregister_hook(reg);
282 	rtnl_unlock();
283 }
284 EXPORT_SYMBOL(nf_unregister_hook);
285 
286 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
287 {
288 	unsigned int i;
289 	int err = 0;
290 
291 	for (i = 0; i < n; i++) {
292 		err = nf_register_hook(&reg[i]);
293 		if (err)
294 			goto err;
295 	}
296 	return err;
297 
298 err:
299 	if (i > 0)
300 		nf_unregister_hooks(reg, i);
301 	return err;
302 }
303 EXPORT_SYMBOL(nf_register_hooks);
304 
305 /* Caller MUST take rtnl_lock() */
306 int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
307 {
308 	unsigned int i;
309 	int err = 0;
310 
311 	for (i = 0; i < n; i++) {
312 		err = _nf_register_hook(&reg[i]);
313 		if (err)
314 			goto err;
315 	}
316 	return err;
317 
318 err:
319 	if (i > 0)
320 		_nf_unregister_hooks(reg, i);
321 	return err;
322 }
323 EXPORT_SYMBOL(_nf_register_hooks);
324 
325 void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
326 {
327 	while (n-- > 0)
328 		nf_unregister_hook(&reg[n]);
329 }
330 EXPORT_SYMBOL(nf_unregister_hooks);
331 
332 /* Caller MUST take rtnl_lock */
333 void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
334 {
335 	while (n-- > 0)
336 		_nf_unregister_hook(&reg[n]);
337 }
338 EXPORT_SYMBOL(_nf_unregister_hooks);
339 
340 unsigned int nf_iterate(struct sk_buff *skb,
341 			struct nf_hook_state *state,
342 			struct nf_hook_entry **entryp)
343 {
344 	unsigned int verdict;
345 
346 	/*
347 	 * The caller must not block between calls to this
348 	 * function because of risk of continuing from deleted element.
349 	 */
350 	while (*entryp) {
351 		if (state->thresh > (*entryp)->ops.priority) {
352 			*entryp = rcu_dereference((*entryp)->next);
353 			continue;
354 		}
355 
356 		/* Optimization: we don't need to hold module
357 		   reference here, since function can't sleep. --RR */
358 repeat:
359 		verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state);
360 		if (verdict != NF_ACCEPT) {
361 #ifdef CONFIG_NETFILTER_DEBUG
362 			if (unlikely((verdict & NF_VERDICT_MASK)
363 							> NF_MAX_VERDICT)) {
364 				NFDEBUG("Evil return from %p(%u).\n",
365 					(*entryp)->ops.hook, state->hook);
366 				*entryp = rcu_dereference((*entryp)->next);
367 				continue;
368 			}
369 #endif
370 			if (verdict != NF_REPEAT)
371 				return verdict;
372 			goto repeat;
373 		}
374 		*entryp = rcu_dereference((*entryp)->next);
375 	}
376 	return NF_ACCEPT;
377 }
378 
379 
380 /* Returns 1 if okfn() needs to be executed by the caller,
381  * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
382 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
383 {
384 	struct nf_hook_entry *entry;
385 	unsigned int verdict;
386 	int ret = 0;
387 
388 	entry = rcu_dereference(state->hook_entries);
389 next_hook:
390 	verdict = nf_iterate(skb, state, &entry);
391 	if (verdict == NF_ACCEPT || verdict == NF_STOP) {
392 		ret = 1;
393 	} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
394 		kfree_skb(skb);
395 		ret = NF_DROP_GETERR(verdict);
396 		if (ret == 0)
397 			ret = -EPERM;
398 	} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
399 		int err;
400 
401 		RCU_INIT_POINTER(state->hook_entries, entry);
402 		err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
403 		if (err < 0) {
404 			if (err == -ESRCH &&
405 			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
406 				goto next_hook;
407 			kfree_skb(skb);
408 		}
409 	}
410 	return ret;
411 }
412 EXPORT_SYMBOL(nf_hook_slow);
413 
414 
415 int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
416 {
417 	if (writable_len > skb->len)
418 		return 0;
419 
420 	/* Not exclusive use of packet?  Must copy. */
421 	if (!skb_cloned(skb)) {
422 		if (writable_len <= skb_headlen(skb))
423 			return 1;
424 	} else if (skb_clone_writable(skb, writable_len))
425 		return 1;
426 
427 	if (writable_len <= skb_headlen(skb))
428 		writable_len = 0;
429 	else
430 		writable_len -= skb_headlen(skb);
431 
432 	return !!__pskb_pull_tail(skb, writable_len);
433 }
434 EXPORT_SYMBOL(skb_make_writable);
435 
436 /* This needs to be compiled in any case to avoid dependencies between the
437  * nfnetlink_queue code and nf_conntrack.
438  */
439 struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
440 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
441 
442 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
443 /* This does not belong here, but locally generated errors need it if connection
444    tracking in use: without this, connection may not be in hash table, and hence
445    manufactured ICMP or RST packets will not be associated with it. */
446 void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
447 		__rcu __read_mostly;
448 EXPORT_SYMBOL(ip_ct_attach);
449 
450 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
451 {
452 	void (*attach)(struct sk_buff *, const struct sk_buff *);
453 
454 	if (skb->nfct) {
455 		rcu_read_lock();
456 		attach = rcu_dereference(ip_ct_attach);
457 		if (attach)
458 			attach(new, skb);
459 		rcu_read_unlock();
460 	}
461 }
462 EXPORT_SYMBOL(nf_ct_attach);
463 
464 void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
465 EXPORT_SYMBOL(nf_ct_destroy);
466 
467 void nf_conntrack_destroy(struct nf_conntrack *nfct)
468 {
469 	void (*destroy)(struct nf_conntrack *);
470 
471 	rcu_read_lock();
472 	destroy = rcu_dereference(nf_ct_destroy);
473 	BUG_ON(destroy == NULL);
474 	destroy(nfct);
475 	rcu_read_unlock();
476 }
477 EXPORT_SYMBOL(nf_conntrack_destroy);
478 
479 /* Built-in default zone used e.g. by modules. */
480 const struct nf_conntrack_zone nf_ct_zone_dflt = {
481 	.id	= NF_CT_DEFAULT_ZONE_ID,
482 	.dir	= NF_CT_DEFAULT_ZONE_DIR,
483 };
484 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
485 #endif /* CONFIG_NF_CONNTRACK */
486 
487 #ifdef CONFIG_NF_NAT_NEEDED
488 void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
489 EXPORT_SYMBOL(nf_nat_decode_session_hook);
490 #endif
491 
492 static int nf_register_hook_list(struct net *net)
493 {
494 	struct nf_hook_ops *elem;
495 	int ret;
496 
497 	rtnl_lock();
498 	list_for_each_entry(elem, &nf_hook_list, list) {
499 		ret = nf_register_net_hook(net, elem);
500 		if (ret && ret != -ENOENT)
501 			goto out_undo;
502 	}
503 	rtnl_unlock();
504 	return 0;
505 
506 out_undo:
507 	list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
508 		nf_unregister_net_hook(net, elem);
509 	rtnl_unlock();
510 	return ret;
511 }
512 
513 static void nf_unregister_hook_list(struct net *net)
514 {
515 	struct nf_hook_ops *elem;
516 
517 	rtnl_lock();
518 	list_for_each_entry(elem, &nf_hook_list, list)
519 		nf_unregister_net_hook(net, elem);
520 	rtnl_unlock();
521 }
522 
523 static int __net_init netfilter_net_init(struct net *net)
524 {
525 	int i, h, ret;
526 
527 	for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
528 		for (h = 0; h < NF_MAX_HOOKS; h++)
529 			RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
530 	}
531 
532 #ifdef CONFIG_PROC_FS
533 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
534 						net->proc_net);
535 	if (!net->nf.proc_netfilter) {
536 		if (!net_eq(net, &init_net))
537 			pr_err("cannot create netfilter proc entry");
538 
539 		return -ENOMEM;
540 	}
541 #endif
542 	ret = nf_register_hook_list(net);
543 	if (ret)
544 		remove_proc_entry("netfilter", net->proc_net);
545 
546 	return ret;
547 }
548 
549 static void __net_exit netfilter_net_exit(struct net *net)
550 {
551 	nf_unregister_hook_list(net);
552 	remove_proc_entry("netfilter", net->proc_net);
553 }
554 
555 static struct pernet_operations netfilter_net_ops = {
556 	.init = netfilter_net_init,
557 	.exit = netfilter_net_exit,
558 };
559 
560 int __init netfilter_init(void)
561 {
562 	int ret;
563 
564 	ret = register_pernet_subsys(&netfilter_net_ops);
565 	if (ret < 0)
566 		goto err;
567 
568 	ret = netfilter_log_init();
569 	if (ret < 0)
570 		goto err_pernet;
571 
572 	return 0;
573 err_pernet:
574 	unregister_pernet_subsys(&netfilter_net_ops);
575 err:
576 	return ret;
577 }
578