xref: /openbmc/linux/net/ipv4/netfilter/ip_tables.c (revision 6b5fc336)
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
21 #include <net/ip.h>
22 #include <net/compat.h>
23 #include <linux/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28 
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33 
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
37 
38 #ifdef CONFIG_NETFILTER_DEBUG
39 #define IP_NF_ASSERT(x)		WARN_ON(!(x))
40 #else
41 #define IP_NF_ASSERT(x)
42 #endif
43 
44 void *ipt_alloc_initial_table(const struct xt_table *info)
45 {
46 	return xt_alloc_initial_table(ipt, IPT);
47 }
48 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
49 
50 /* Returns whether matches rule or not. */
51 /* Performance critical - called for every packet */
52 static inline bool
53 ip_packet_match(const struct iphdr *ip,
54 		const char *indev,
55 		const char *outdev,
56 		const struct ipt_ip *ipinfo,
57 		int isfrag)
58 {
59 	unsigned long ret;
60 
61 	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
62 		    (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
63 	    NF_INVF(ipinfo, IPT_INV_DSTIP,
64 		    (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
65 		return false;
66 
67 	ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
68 
69 	if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
70 		return false;
71 
72 	ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
73 
74 	if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
75 		return false;
76 
77 	/* Check specific protocol */
78 	if (ipinfo->proto &&
79 	    NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
80 		return false;
81 
82 	/* If we have a fragment rule but the packet is not a fragment
83 	 * then we return zero */
84 	if (NF_INVF(ipinfo, IPT_INV_FRAG,
85 		    (ipinfo->flags & IPT_F_FRAG) && !isfrag))
86 		return false;
87 
88 	return true;
89 }
90 
91 static bool
92 ip_checkentry(const struct ipt_ip *ip)
93 {
94 	if (ip->flags & ~IPT_F_MASK)
95 		return false;
96 	if (ip->invflags & ~IPT_INV_MASK)
97 		return false;
98 	return true;
99 }
100 
101 static unsigned int
102 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
103 {
104 	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
105 
106 	return NF_DROP;
107 }
108 
109 /* Performance critical */
110 static inline struct ipt_entry *
111 get_entry(const void *base, unsigned int offset)
112 {
113 	return (struct ipt_entry *)(base + offset);
114 }
115 
116 /* All zeroes == unconditional rule. */
117 /* Mildly perf critical (only if packet tracing is on) */
118 static inline bool unconditional(const struct ipt_entry *e)
119 {
120 	static const struct ipt_ip uncond;
121 
122 	return e->target_offset == sizeof(struct ipt_entry) &&
123 	       memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
124 }
125 
126 /* for const-correctness */
127 static inline const struct xt_entry_target *
128 ipt_get_target_c(const struct ipt_entry *e)
129 {
130 	return ipt_get_target((struct ipt_entry *)e);
131 }
132 
133 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
134 static const char *const hooknames[] = {
135 	[NF_INET_PRE_ROUTING]		= "PREROUTING",
136 	[NF_INET_LOCAL_IN]		= "INPUT",
137 	[NF_INET_FORWARD]		= "FORWARD",
138 	[NF_INET_LOCAL_OUT]		= "OUTPUT",
139 	[NF_INET_POST_ROUTING]		= "POSTROUTING",
140 };
141 
142 enum nf_ip_trace_comments {
143 	NF_IP_TRACE_COMMENT_RULE,
144 	NF_IP_TRACE_COMMENT_RETURN,
145 	NF_IP_TRACE_COMMENT_POLICY,
146 };
147 
148 static const char *const comments[] = {
149 	[NF_IP_TRACE_COMMENT_RULE]	= "rule",
150 	[NF_IP_TRACE_COMMENT_RETURN]	= "return",
151 	[NF_IP_TRACE_COMMENT_POLICY]	= "policy",
152 };
153 
154 static struct nf_loginfo trace_loginfo = {
155 	.type = NF_LOG_TYPE_LOG,
156 	.u = {
157 		.log = {
158 			.level = 4,
159 			.logflags = NF_LOG_DEFAULT_MASK,
160 		},
161 	},
162 };
163 
164 /* Mildly perf critical (only if packet tracing is on) */
165 static inline int
166 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
167 		      const char *hookname, const char **chainname,
168 		      const char **comment, unsigned int *rulenum)
169 {
170 	const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
171 
172 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
173 		/* Head of user chain: ERROR target with chainname */
174 		*chainname = t->target.data;
175 		(*rulenum) = 0;
176 	} else if (s == e) {
177 		(*rulenum)++;
178 
179 		if (unconditional(s) &&
180 		    strcmp(t->target.u.kernel.target->name,
181 			   XT_STANDARD_TARGET) == 0 &&
182 		   t->verdict < 0) {
183 			/* Tail of chains: STANDARD target (return/policy) */
184 			*comment = *chainname == hookname
185 				? comments[NF_IP_TRACE_COMMENT_POLICY]
186 				: comments[NF_IP_TRACE_COMMENT_RETURN];
187 		}
188 		return 1;
189 	} else
190 		(*rulenum)++;
191 
192 	return 0;
193 }
194 
195 static void trace_packet(struct net *net,
196 			 const struct sk_buff *skb,
197 			 unsigned int hook,
198 			 const struct net_device *in,
199 			 const struct net_device *out,
200 			 const char *tablename,
201 			 const struct xt_table_info *private,
202 			 const struct ipt_entry *e)
203 {
204 	const struct ipt_entry *root;
205 	const char *hookname, *chainname, *comment;
206 	const struct ipt_entry *iter;
207 	unsigned int rulenum = 0;
208 
209 	root = get_entry(private->entries, private->hook_entry[hook]);
210 
211 	hookname = chainname = hooknames[hook];
212 	comment = comments[NF_IP_TRACE_COMMENT_RULE];
213 
214 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
215 		if (get_chainname_rulenum(iter, e, hookname,
216 		    &chainname, &comment, &rulenum) != 0)
217 			break;
218 
219 	nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
220 		     "TRACE: %s:%s:%s:%u ",
221 		     tablename, chainname, comment, rulenum);
222 }
223 #endif
224 
225 static inline
226 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
227 {
228 	return (void *)entry + entry->next_offset;
229 }
230 
231 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
232 unsigned int
233 ipt_do_table(struct sk_buff *skb,
234 	     const struct nf_hook_state *state,
235 	     struct xt_table *table)
236 {
237 	unsigned int hook = state->hook;
238 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
239 	const struct iphdr *ip;
240 	/* Initializing verdict to NF_DROP keeps gcc happy. */
241 	unsigned int verdict = NF_DROP;
242 	const char *indev, *outdev;
243 	const void *table_base;
244 	struct ipt_entry *e, **jumpstack;
245 	unsigned int stackidx, cpu;
246 	const struct xt_table_info *private;
247 	struct xt_action_param acpar;
248 	unsigned int addend;
249 
250 	/* Initialization */
251 	stackidx = 0;
252 	ip = ip_hdr(skb);
253 	indev = state->in ? state->in->name : nulldevname;
254 	outdev = state->out ? state->out->name : nulldevname;
255 	/* We handle fragments by dealing with the first fragment as
256 	 * if it was a normal packet.  All other fragments are treated
257 	 * normally, except that they will NEVER match rules that ask
258 	 * things we don't know, ie. tcp syn flag or ports).  If the
259 	 * rule is also a fragment-specific rule, non-fragments won't
260 	 * match it. */
261 	acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
262 	acpar.thoff   = ip_hdrlen(skb);
263 	acpar.hotdrop = false;
264 	acpar.state   = state;
265 
266 	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
267 	local_bh_disable();
268 	addend = xt_write_recseq_begin();
269 	private = table->private;
270 	cpu        = smp_processor_id();
271 	/*
272 	 * Ensure we load private-> members after we've fetched the base
273 	 * pointer.
274 	 */
275 	smp_read_barrier_depends();
276 	table_base = private->entries;
277 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
278 
279 	/* Switch to alternate jumpstack if we're being invoked via TEE.
280 	 * TEE issues XT_CONTINUE verdict on original skb so we must not
281 	 * clobber the jumpstack.
282 	 *
283 	 * For recursion via REJECT or SYNPROXY the stack will be clobbered
284 	 * but it is no problem since absolute verdict is issued by these.
285 	 */
286 	if (static_key_false(&xt_tee_enabled))
287 		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
288 
289 	e = get_entry(table_base, private->hook_entry[hook]);
290 
291 	do {
292 		const struct xt_entry_target *t;
293 		const struct xt_entry_match *ematch;
294 		struct xt_counters *counter;
295 
296 		IP_NF_ASSERT(e);
297 		if (!ip_packet_match(ip, indev, outdev,
298 		    &e->ip, acpar.fragoff)) {
299  no_match:
300 			e = ipt_next_entry(e);
301 			continue;
302 		}
303 
304 		xt_ematch_foreach(ematch, e) {
305 			acpar.match     = ematch->u.kernel.match;
306 			acpar.matchinfo = ematch->data;
307 			if (!acpar.match->match(skb, &acpar))
308 				goto no_match;
309 		}
310 
311 		counter = xt_get_this_cpu_counter(&e->counters);
312 		ADD_COUNTER(*counter, skb->len, 1);
313 
314 		t = ipt_get_target(e);
315 		IP_NF_ASSERT(t->u.kernel.target);
316 
317 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
318 		/* The packet is traced: log it */
319 		if (unlikely(skb->nf_trace))
320 			trace_packet(state->net, skb, hook, state->in,
321 				     state->out, table->name, private, e);
322 #endif
323 		/* Standard target? */
324 		if (!t->u.kernel.target->target) {
325 			int v;
326 
327 			v = ((struct xt_standard_target *)t)->verdict;
328 			if (v < 0) {
329 				/* Pop from stack? */
330 				if (v != XT_RETURN) {
331 					verdict = (unsigned int)(-v) - 1;
332 					break;
333 				}
334 				if (stackidx == 0) {
335 					e = get_entry(table_base,
336 					    private->underflow[hook]);
337 				} else {
338 					e = jumpstack[--stackidx];
339 					e = ipt_next_entry(e);
340 				}
341 				continue;
342 			}
343 			if (table_base + v != ipt_next_entry(e) &&
344 			    !(e->ip.flags & IPT_F_GOTO))
345 				jumpstack[stackidx++] = e;
346 
347 			e = get_entry(table_base, v);
348 			continue;
349 		}
350 
351 		acpar.target   = t->u.kernel.target;
352 		acpar.targinfo = t->data;
353 
354 		verdict = t->u.kernel.target->target(skb, &acpar);
355 		/* Target might have changed stuff. */
356 		ip = ip_hdr(skb);
357 		if (verdict == XT_CONTINUE)
358 			e = ipt_next_entry(e);
359 		else
360 			/* Verdict */
361 			break;
362 	} while (!acpar.hotdrop);
363 
364 	xt_write_recseq_end(addend);
365 	local_bh_enable();
366 
367 	if (acpar.hotdrop)
368 		return NF_DROP;
369 	else return verdict;
370 }
371 
372 /* Figures out from what hook each rule can be called: returns 0 if
373    there are loops.  Puts hook bitmask in comefrom. */
374 static int
375 mark_source_chains(const struct xt_table_info *newinfo,
376 		   unsigned int valid_hooks, void *entry0,
377 		   unsigned int *offsets)
378 {
379 	unsigned int hook;
380 
381 	/* No recursion; use packet counter to save back ptrs (reset
382 	   to 0 as we leave), and comefrom to save source hook bitmask */
383 	for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
384 		unsigned int pos = newinfo->hook_entry[hook];
385 		struct ipt_entry *e = entry0 + pos;
386 
387 		if (!(valid_hooks & (1 << hook)))
388 			continue;
389 
390 		/* Set initial back pointer. */
391 		e->counters.pcnt = pos;
392 
393 		for (;;) {
394 			const struct xt_standard_target *t
395 				= (void *)ipt_get_target_c(e);
396 			int visited = e->comefrom & (1 << hook);
397 
398 			if (e->comefrom & (1 << NF_INET_NUMHOOKS))
399 				return 0;
400 
401 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
402 
403 			/* Unconditional return/END. */
404 			if ((unconditional(e) &&
405 			     (strcmp(t->target.u.user.name,
406 				     XT_STANDARD_TARGET) == 0) &&
407 			     t->verdict < 0) || visited) {
408 				unsigned int oldpos, size;
409 
410 				if ((strcmp(t->target.u.user.name,
411 					    XT_STANDARD_TARGET) == 0) &&
412 				    t->verdict < -NF_MAX_VERDICT - 1)
413 					return 0;
414 
415 				/* Return: backtrack through the last
416 				   big jump. */
417 				do {
418 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
419 					oldpos = pos;
420 					pos = e->counters.pcnt;
421 					e->counters.pcnt = 0;
422 
423 					/* We're at the start. */
424 					if (pos == oldpos)
425 						goto next;
426 
427 					e = entry0 + pos;
428 				} while (oldpos == pos + e->next_offset);
429 
430 				/* Move along one */
431 				size = e->next_offset;
432 				e = entry0 + pos + size;
433 				if (pos + size >= newinfo->size)
434 					return 0;
435 				e->counters.pcnt = pos;
436 				pos += size;
437 			} else {
438 				int newpos = t->verdict;
439 
440 				if (strcmp(t->target.u.user.name,
441 					   XT_STANDARD_TARGET) == 0 &&
442 				    newpos >= 0) {
443 					/* This a jump; chase it. */
444 					if (!xt_find_jump_offset(offsets, newpos,
445 								 newinfo->number))
446 						return 0;
447 					e = entry0 + newpos;
448 				} else {
449 					/* ... this is a fallthru */
450 					newpos = pos + e->next_offset;
451 					if (newpos >= newinfo->size)
452 						return 0;
453 				}
454 				e = entry0 + newpos;
455 				e->counters.pcnt = pos;
456 				pos = newpos;
457 			}
458 		}
459 next:		;
460 	}
461 	return 1;
462 }
463 
464 static void cleanup_match(struct xt_entry_match *m, struct net *net)
465 {
466 	struct xt_mtdtor_param par;
467 
468 	par.net       = net;
469 	par.match     = m->u.kernel.match;
470 	par.matchinfo = m->data;
471 	par.family    = NFPROTO_IPV4;
472 	if (par.match->destroy != NULL)
473 		par.match->destroy(&par);
474 	module_put(par.match->me);
475 }
476 
477 static int
478 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
479 {
480 	const struct ipt_ip *ip = par->entryinfo;
481 
482 	par->match     = m->u.kernel.match;
483 	par->matchinfo = m->data;
484 
485 	return xt_check_match(par, m->u.match_size - sizeof(*m),
486 			      ip->proto, ip->invflags & IPT_INV_PROTO);
487 }
488 
489 static int
490 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
491 {
492 	struct xt_match *match;
493 	int ret;
494 
495 	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
496 				      m->u.user.revision);
497 	if (IS_ERR(match))
498 		return PTR_ERR(match);
499 	m->u.kernel.match = match;
500 
501 	ret = check_match(m, par);
502 	if (ret)
503 		goto err;
504 
505 	return 0;
506 err:
507 	module_put(m->u.kernel.match->me);
508 	return ret;
509 }
510 
511 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
512 {
513 	struct xt_entry_target *t = ipt_get_target(e);
514 	struct xt_tgchk_param par = {
515 		.net       = net,
516 		.table     = name,
517 		.entryinfo = e,
518 		.target    = t->u.kernel.target,
519 		.targinfo  = t->data,
520 		.hook_mask = e->comefrom,
521 		.family    = NFPROTO_IPV4,
522 	};
523 
524 	return xt_check_target(&par, t->u.target_size - sizeof(*t),
525 			       e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
526 }
527 
528 static int
529 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
530 		 unsigned int size,
531 		 struct xt_percpu_counter_alloc_state *alloc_state)
532 {
533 	struct xt_entry_target *t;
534 	struct xt_target *target;
535 	int ret;
536 	unsigned int j;
537 	struct xt_mtchk_param mtpar;
538 	struct xt_entry_match *ematch;
539 
540 	if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
541 		return -ENOMEM;
542 
543 	j = 0;
544 	mtpar.net	= net;
545 	mtpar.table     = name;
546 	mtpar.entryinfo = &e->ip;
547 	mtpar.hook_mask = e->comefrom;
548 	mtpar.family    = NFPROTO_IPV4;
549 	xt_ematch_foreach(ematch, e) {
550 		ret = find_check_match(ematch, &mtpar);
551 		if (ret != 0)
552 			goto cleanup_matches;
553 		++j;
554 	}
555 
556 	t = ipt_get_target(e);
557 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
558 					t->u.user.revision);
559 	if (IS_ERR(target)) {
560 		ret = PTR_ERR(target);
561 		goto cleanup_matches;
562 	}
563 	t->u.kernel.target = target;
564 
565 	ret = check_target(e, net, name);
566 	if (ret)
567 		goto err;
568 
569 	return 0;
570  err:
571 	module_put(t->u.kernel.target->me);
572  cleanup_matches:
573 	xt_ematch_foreach(ematch, e) {
574 		if (j-- == 0)
575 			break;
576 		cleanup_match(ematch, net);
577 	}
578 
579 	xt_percpu_counter_free(&e->counters);
580 
581 	return ret;
582 }
583 
584 static bool check_underflow(const struct ipt_entry *e)
585 {
586 	const struct xt_entry_target *t;
587 	unsigned int verdict;
588 
589 	if (!unconditional(e))
590 		return false;
591 	t = ipt_get_target_c(e);
592 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
593 		return false;
594 	verdict = ((struct xt_standard_target *)t)->verdict;
595 	verdict = -verdict - 1;
596 	return verdict == NF_DROP || verdict == NF_ACCEPT;
597 }
598 
599 static int
600 check_entry_size_and_hooks(struct ipt_entry *e,
601 			   struct xt_table_info *newinfo,
602 			   const unsigned char *base,
603 			   const unsigned char *limit,
604 			   const unsigned int *hook_entries,
605 			   const unsigned int *underflows,
606 			   unsigned int valid_hooks)
607 {
608 	unsigned int h;
609 	int err;
610 
611 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
612 	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
613 	    (unsigned char *)e + e->next_offset > limit)
614 		return -EINVAL;
615 
616 	if (e->next_offset
617 	    < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
618 		return -EINVAL;
619 
620 	if (!ip_checkentry(&e->ip))
621 		return -EINVAL;
622 
623 	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
624 				     e->next_offset);
625 	if (err)
626 		return err;
627 
628 	/* Check hooks & underflows */
629 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
630 		if (!(valid_hooks & (1 << h)))
631 			continue;
632 		if ((unsigned char *)e - base == hook_entries[h])
633 			newinfo->hook_entry[h] = hook_entries[h];
634 		if ((unsigned char *)e - base == underflows[h]) {
635 			if (!check_underflow(e))
636 				return -EINVAL;
637 
638 			newinfo->underflow[h] = underflows[h];
639 		}
640 	}
641 
642 	/* Clear counters and comefrom */
643 	e->counters = ((struct xt_counters) { 0, 0 });
644 	e->comefrom = 0;
645 	return 0;
646 }
647 
648 static void
649 cleanup_entry(struct ipt_entry *e, struct net *net)
650 {
651 	struct xt_tgdtor_param par;
652 	struct xt_entry_target *t;
653 	struct xt_entry_match *ematch;
654 
655 	/* Cleanup all matches */
656 	xt_ematch_foreach(ematch, e)
657 		cleanup_match(ematch, net);
658 	t = ipt_get_target(e);
659 
660 	par.net      = net;
661 	par.target   = t->u.kernel.target;
662 	par.targinfo = t->data;
663 	par.family   = NFPROTO_IPV4;
664 	if (par.target->destroy != NULL)
665 		par.target->destroy(&par);
666 	module_put(par.target->me);
667 	xt_percpu_counter_free(&e->counters);
668 }
669 
670 /* Checks and translates the user-supplied table segment (held in
671    newinfo) */
672 static int
673 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
674 		const struct ipt_replace *repl)
675 {
676 	struct xt_percpu_counter_alloc_state alloc_state = { 0 };
677 	struct ipt_entry *iter;
678 	unsigned int *offsets;
679 	unsigned int i;
680 	int ret = 0;
681 
682 	newinfo->size = repl->size;
683 	newinfo->number = repl->num_entries;
684 
685 	/* Init all hooks to impossible value. */
686 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
687 		newinfo->hook_entry[i] = 0xFFFFFFFF;
688 		newinfo->underflow[i] = 0xFFFFFFFF;
689 	}
690 
691 	offsets = xt_alloc_entry_offsets(newinfo->number);
692 	if (!offsets)
693 		return -ENOMEM;
694 	i = 0;
695 	/* Walk through entries, checking offsets. */
696 	xt_entry_foreach(iter, entry0, newinfo->size) {
697 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
698 						 entry0 + repl->size,
699 						 repl->hook_entry,
700 						 repl->underflow,
701 						 repl->valid_hooks);
702 		if (ret != 0)
703 			goto out_free;
704 		if (i < repl->num_entries)
705 			offsets[i] = (void *)iter - entry0;
706 		++i;
707 		if (strcmp(ipt_get_target(iter)->u.user.name,
708 		    XT_ERROR_TARGET) == 0)
709 			++newinfo->stacksize;
710 	}
711 
712 	ret = -EINVAL;
713 	if (i != repl->num_entries)
714 		goto out_free;
715 
716 	/* Check hooks all assigned */
717 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
718 		/* Only hooks which are valid */
719 		if (!(repl->valid_hooks & (1 << i)))
720 			continue;
721 		if (newinfo->hook_entry[i] == 0xFFFFFFFF)
722 			goto out_free;
723 		if (newinfo->underflow[i] == 0xFFFFFFFF)
724 			goto out_free;
725 	}
726 
727 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
728 		ret = -ELOOP;
729 		goto out_free;
730 	}
731 	kvfree(offsets);
732 
733 	/* Finally, each sanity check must pass */
734 	i = 0;
735 	xt_entry_foreach(iter, entry0, newinfo->size) {
736 		ret = find_check_entry(iter, net, repl->name, repl->size,
737 				       &alloc_state);
738 		if (ret != 0)
739 			break;
740 		++i;
741 	}
742 
743 	if (ret != 0) {
744 		xt_entry_foreach(iter, entry0, newinfo->size) {
745 			if (i-- == 0)
746 				break;
747 			cleanup_entry(iter, net);
748 		}
749 		return ret;
750 	}
751 
752 	return ret;
753  out_free:
754 	kvfree(offsets);
755 	return ret;
756 }
757 
758 static void
759 get_counters(const struct xt_table_info *t,
760 	     struct xt_counters counters[])
761 {
762 	struct ipt_entry *iter;
763 	unsigned int cpu;
764 	unsigned int i;
765 
766 	for_each_possible_cpu(cpu) {
767 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
768 
769 		i = 0;
770 		xt_entry_foreach(iter, t->entries, t->size) {
771 			struct xt_counters *tmp;
772 			u64 bcnt, pcnt;
773 			unsigned int start;
774 
775 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
776 			do {
777 				start = read_seqcount_begin(s);
778 				bcnt = tmp->bcnt;
779 				pcnt = tmp->pcnt;
780 			} while (read_seqcount_retry(s, start));
781 
782 			ADD_COUNTER(counters[i], bcnt, pcnt);
783 			++i; /* macro does multi eval of i */
784 		}
785 	}
786 }
787 
788 static struct xt_counters *alloc_counters(const struct xt_table *table)
789 {
790 	unsigned int countersize;
791 	struct xt_counters *counters;
792 	const struct xt_table_info *private = table->private;
793 
794 	/* We need atomic snapshot of counters: rest doesn't change
795 	   (other than comefrom, which userspace doesn't care
796 	   about). */
797 	countersize = sizeof(struct xt_counters) * private->number;
798 	counters = vzalloc(countersize);
799 
800 	if (counters == NULL)
801 		return ERR_PTR(-ENOMEM);
802 
803 	get_counters(private, counters);
804 
805 	return counters;
806 }
807 
808 static int
809 copy_entries_to_user(unsigned int total_size,
810 		     const struct xt_table *table,
811 		     void __user *userptr)
812 {
813 	unsigned int off, num;
814 	const struct ipt_entry *e;
815 	struct xt_counters *counters;
816 	const struct xt_table_info *private = table->private;
817 	int ret = 0;
818 	const void *loc_cpu_entry;
819 
820 	counters = alloc_counters(table);
821 	if (IS_ERR(counters))
822 		return PTR_ERR(counters);
823 
824 	loc_cpu_entry = private->entries;
825 
826 	/* FIXME: use iterator macros --RR */
827 	/* ... then go back and fix counters and names */
828 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
829 		unsigned int i;
830 		const struct xt_entry_match *m;
831 		const struct xt_entry_target *t;
832 
833 		e = loc_cpu_entry + off;
834 		if (copy_to_user(userptr + off, e, sizeof(*e))) {
835 			ret = -EFAULT;
836 			goto free_counters;
837 		}
838 		if (copy_to_user(userptr + off
839 				 + offsetof(struct ipt_entry, counters),
840 				 &counters[num],
841 				 sizeof(counters[num])) != 0) {
842 			ret = -EFAULT;
843 			goto free_counters;
844 		}
845 
846 		for (i = sizeof(struct ipt_entry);
847 		     i < e->target_offset;
848 		     i += m->u.match_size) {
849 			m = (void *)e + i;
850 
851 			if (xt_match_to_user(m, userptr + off + i)) {
852 				ret = -EFAULT;
853 				goto free_counters;
854 			}
855 		}
856 
857 		t = ipt_get_target_c(e);
858 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
859 			ret = -EFAULT;
860 			goto free_counters;
861 		}
862 	}
863 
864  free_counters:
865 	vfree(counters);
866 	return ret;
867 }
868 
869 #ifdef CONFIG_COMPAT
870 static void compat_standard_from_user(void *dst, const void *src)
871 {
872 	int v = *(compat_int_t *)src;
873 
874 	if (v > 0)
875 		v += xt_compat_calc_jump(AF_INET, v);
876 	memcpy(dst, &v, sizeof(v));
877 }
878 
879 static int compat_standard_to_user(void __user *dst, const void *src)
880 {
881 	compat_int_t cv = *(int *)src;
882 
883 	if (cv > 0)
884 		cv -= xt_compat_calc_jump(AF_INET, cv);
885 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
886 }
887 
888 static int compat_calc_entry(const struct ipt_entry *e,
889 			     const struct xt_table_info *info,
890 			     const void *base, struct xt_table_info *newinfo)
891 {
892 	const struct xt_entry_match *ematch;
893 	const struct xt_entry_target *t;
894 	unsigned int entry_offset;
895 	int off, i, ret;
896 
897 	off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
898 	entry_offset = (void *)e - base;
899 	xt_ematch_foreach(ematch, e)
900 		off += xt_compat_match_offset(ematch->u.kernel.match);
901 	t = ipt_get_target_c(e);
902 	off += xt_compat_target_offset(t->u.kernel.target);
903 	newinfo->size -= off;
904 	ret = xt_compat_add_offset(AF_INET, entry_offset, off);
905 	if (ret)
906 		return ret;
907 
908 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
909 		if (info->hook_entry[i] &&
910 		    (e < (struct ipt_entry *)(base + info->hook_entry[i])))
911 			newinfo->hook_entry[i] -= off;
912 		if (info->underflow[i] &&
913 		    (e < (struct ipt_entry *)(base + info->underflow[i])))
914 			newinfo->underflow[i] -= off;
915 	}
916 	return 0;
917 }
918 
919 static int compat_table_info(const struct xt_table_info *info,
920 			     struct xt_table_info *newinfo)
921 {
922 	struct ipt_entry *iter;
923 	const void *loc_cpu_entry;
924 	int ret;
925 
926 	if (!newinfo || !info)
927 		return -EINVAL;
928 
929 	/* we dont care about newinfo->entries */
930 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
931 	newinfo->initial_entries = 0;
932 	loc_cpu_entry = info->entries;
933 	xt_compat_init_offsets(AF_INET, info->number);
934 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
935 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
936 		if (ret != 0)
937 			return ret;
938 	}
939 	return 0;
940 }
941 #endif
942 
943 static int get_info(struct net *net, void __user *user,
944 		    const int *len, int compat)
945 {
946 	char name[XT_TABLE_MAXNAMELEN];
947 	struct xt_table *t;
948 	int ret;
949 
950 	if (*len != sizeof(struct ipt_getinfo))
951 		return -EINVAL;
952 
953 	if (copy_from_user(name, user, sizeof(name)) != 0)
954 		return -EFAULT;
955 
956 	name[XT_TABLE_MAXNAMELEN-1] = '\0';
957 #ifdef CONFIG_COMPAT
958 	if (compat)
959 		xt_compat_lock(AF_INET);
960 #endif
961 	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
962 				    "iptable_%s", name);
963 	if (t) {
964 		struct ipt_getinfo info;
965 		const struct xt_table_info *private = t->private;
966 #ifdef CONFIG_COMPAT
967 		struct xt_table_info tmp;
968 
969 		if (compat) {
970 			ret = compat_table_info(private, &tmp);
971 			xt_compat_flush_offsets(AF_INET);
972 			private = &tmp;
973 		}
974 #endif
975 		memset(&info, 0, sizeof(info));
976 		info.valid_hooks = t->valid_hooks;
977 		memcpy(info.hook_entry, private->hook_entry,
978 		       sizeof(info.hook_entry));
979 		memcpy(info.underflow, private->underflow,
980 		       sizeof(info.underflow));
981 		info.num_entries = private->number;
982 		info.size = private->size;
983 		strcpy(info.name, name);
984 
985 		if (copy_to_user(user, &info, *len) != 0)
986 			ret = -EFAULT;
987 		else
988 			ret = 0;
989 
990 		xt_table_unlock(t);
991 		module_put(t->me);
992 	} else
993 		ret = -ENOENT;
994 #ifdef CONFIG_COMPAT
995 	if (compat)
996 		xt_compat_unlock(AF_INET);
997 #endif
998 	return ret;
999 }
1000 
1001 static int
1002 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1003 	    const int *len)
1004 {
1005 	int ret;
1006 	struct ipt_get_entries get;
1007 	struct xt_table *t;
1008 
1009 	if (*len < sizeof(get))
1010 		return -EINVAL;
1011 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1012 		return -EFAULT;
1013 	if (*len != sizeof(struct ipt_get_entries) + get.size)
1014 		return -EINVAL;
1015 	get.name[sizeof(get.name) - 1] = '\0';
1016 
1017 	t = xt_find_table_lock(net, AF_INET, get.name);
1018 	if (t) {
1019 		const struct xt_table_info *private = t->private;
1020 		if (get.size == private->size)
1021 			ret = copy_entries_to_user(private->size,
1022 						   t, uptr->entrytable);
1023 		else
1024 			ret = -EAGAIN;
1025 
1026 		module_put(t->me);
1027 		xt_table_unlock(t);
1028 	} else
1029 		ret = -ENOENT;
1030 
1031 	return ret;
1032 }
1033 
1034 static int
1035 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1036 	     struct xt_table_info *newinfo, unsigned int num_counters,
1037 	     void __user *counters_ptr)
1038 {
1039 	int ret;
1040 	struct xt_table *t;
1041 	struct xt_table_info *oldinfo;
1042 	struct xt_counters *counters;
1043 	struct ipt_entry *iter;
1044 
1045 	ret = 0;
1046 	counters = vzalloc(num_counters * sizeof(struct xt_counters));
1047 	if (!counters) {
1048 		ret = -ENOMEM;
1049 		goto out;
1050 	}
1051 
1052 	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1053 				    "iptable_%s", name);
1054 	if (!t) {
1055 		ret = -ENOENT;
1056 		goto free_newinfo_counters_untrans;
1057 	}
1058 
1059 	/* You lied! */
1060 	if (valid_hooks != t->valid_hooks) {
1061 		ret = -EINVAL;
1062 		goto put_module;
1063 	}
1064 
1065 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1066 	if (!oldinfo)
1067 		goto put_module;
1068 
1069 	/* Update module usage count based on number of rules */
1070 	if ((oldinfo->number > oldinfo->initial_entries) ||
1071 	    (newinfo->number <= oldinfo->initial_entries))
1072 		module_put(t->me);
1073 	if ((oldinfo->number > oldinfo->initial_entries) &&
1074 	    (newinfo->number <= oldinfo->initial_entries))
1075 		module_put(t->me);
1076 
1077 	/* Get the old counters, and synchronize with replace */
1078 	get_counters(oldinfo, counters);
1079 
1080 	/* Decrease module usage counts and free resource */
1081 	xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1082 		cleanup_entry(iter, net);
1083 
1084 	xt_free_table_info(oldinfo);
1085 	if (copy_to_user(counters_ptr, counters,
1086 			 sizeof(struct xt_counters) * num_counters) != 0) {
1087 		/* Silent error, can't fail, new table is already in place */
1088 		net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1089 	}
1090 	vfree(counters);
1091 	xt_table_unlock(t);
1092 	return ret;
1093 
1094  put_module:
1095 	module_put(t->me);
1096 	xt_table_unlock(t);
1097  free_newinfo_counters_untrans:
1098 	vfree(counters);
1099  out:
1100 	return ret;
1101 }
1102 
1103 static int
1104 do_replace(struct net *net, const void __user *user, unsigned int len)
1105 {
1106 	int ret;
1107 	struct ipt_replace tmp;
1108 	struct xt_table_info *newinfo;
1109 	void *loc_cpu_entry;
1110 	struct ipt_entry *iter;
1111 
1112 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1113 		return -EFAULT;
1114 
1115 	/* overflow check */
1116 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1117 		return -ENOMEM;
1118 	if (tmp.num_counters == 0)
1119 		return -EINVAL;
1120 
1121 	tmp.name[sizeof(tmp.name)-1] = 0;
1122 
1123 	newinfo = xt_alloc_table_info(tmp.size);
1124 	if (!newinfo)
1125 		return -ENOMEM;
1126 
1127 	loc_cpu_entry = newinfo->entries;
1128 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1129 			   tmp.size) != 0) {
1130 		ret = -EFAULT;
1131 		goto free_newinfo;
1132 	}
1133 
1134 	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1135 	if (ret != 0)
1136 		goto free_newinfo;
1137 
1138 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1139 			   tmp.num_counters, tmp.counters);
1140 	if (ret)
1141 		goto free_newinfo_untrans;
1142 	return 0;
1143 
1144  free_newinfo_untrans:
1145 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1146 		cleanup_entry(iter, net);
1147  free_newinfo:
1148 	xt_free_table_info(newinfo);
1149 	return ret;
1150 }
1151 
1152 static int
1153 do_add_counters(struct net *net, const void __user *user,
1154 		unsigned int len, int compat)
1155 {
1156 	unsigned int i;
1157 	struct xt_counters_info tmp;
1158 	struct xt_counters *paddc;
1159 	struct xt_table *t;
1160 	const struct xt_table_info *private;
1161 	int ret = 0;
1162 	struct ipt_entry *iter;
1163 	unsigned int addend;
1164 
1165 	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1166 	if (IS_ERR(paddc))
1167 		return PTR_ERR(paddc);
1168 
1169 	t = xt_find_table_lock(net, AF_INET, tmp.name);
1170 	if (!t) {
1171 		ret = -ENOENT;
1172 		goto free;
1173 	}
1174 
1175 	local_bh_disable();
1176 	private = t->private;
1177 	if (private->number != tmp.num_counters) {
1178 		ret = -EINVAL;
1179 		goto unlock_up_free;
1180 	}
1181 
1182 	i = 0;
1183 	addend = xt_write_recseq_begin();
1184 	xt_entry_foreach(iter, private->entries, private->size) {
1185 		struct xt_counters *tmp;
1186 
1187 		tmp = xt_get_this_cpu_counter(&iter->counters);
1188 		ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1189 		++i;
1190 	}
1191 	xt_write_recseq_end(addend);
1192  unlock_up_free:
1193 	local_bh_enable();
1194 	xt_table_unlock(t);
1195 	module_put(t->me);
1196  free:
1197 	vfree(paddc);
1198 
1199 	return ret;
1200 }
1201 
1202 #ifdef CONFIG_COMPAT
1203 struct compat_ipt_replace {
1204 	char			name[XT_TABLE_MAXNAMELEN];
1205 	u32			valid_hooks;
1206 	u32			num_entries;
1207 	u32			size;
1208 	u32			hook_entry[NF_INET_NUMHOOKS];
1209 	u32			underflow[NF_INET_NUMHOOKS];
1210 	u32			num_counters;
1211 	compat_uptr_t		counters;	/* struct xt_counters * */
1212 	struct compat_ipt_entry	entries[0];
1213 };
1214 
1215 static int
1216 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1217 			  unsigned int *size, struct xt_counters *counters,
1218 			  unsigned int i)
1219 {
1220 	struct xt_entry_target *t;
1221 	struct compat_ipt_entry __user *ce;
1222 	u_int16_t target_offset, next_offset;
1223 	compat_uint_t origsize;
1224 	const struct xt_entry_match *ematch;
1225 	int ret = 0;
1226 
1227 	origsize = *size;
1228 	ce = *dstptr;
1229 	if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1230 	    copy_to_user(&ce->counters, &counters[i],
1231 	    sizeof(counters[i])) != 0)
1232 		return -EFAULT;
1233 
1234 	*dstptr += sizeof(struct compat_ipt_entry);
1235 	*size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1236 
1237 	xt_ematch_foreach(ematch, e) {
1238 		ret = xt_compat_match_to_user(ematch, dstptr, size);
1239 		if (ret != 0)
1240 			return ret;
1241 	}
1242 	target_offset = e->target_offset - (origsize - *size);
1243 	t = ipt_get_target(e);
1244 	ret = xt_compat_target_to_user(t, dstptr, size);
1245 	if (ret)
1246 		return ret;
1247 	next_offset = e->next_offset - (origsize - *size);
1248 	if (put_user(target_offset, &ce->target_offset) != 0 ||
1249 	    put_user(next_offset, &ce->next_offset) != 0)
1250 		return -EFAULT;
1251 	return 0;
1252 }
1253 
1254 static int
1255 compat_find_calc_match(struct xt_entry_match *m,
1256 		       const struct ipt_ip *ip,
1257 		       int *size)
1258 {
1259 	struct xt_match *match;
1260 
1261 	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1262 				      m->u.user.revision);
1263 	if (IS_ERR(match))
1264 		return PTR_ERR(match);
1265 
1266 	m->u.kernel.match = match;
1267 	*size += xt_compat_match_offset(match);
1268 	return 0;
1269 }
1270 
1271 static void compat_release_entry(struct compat_ipt_entry *e)
1272 {
1273 	struct xt_entry_target *t;
1274 	struct xt_entry_match *ematch;
1275 
1276 	/* Cleanup all matches */
1277 	xt_ematch_foreach(ematch, e)
1278 		module_put(ematch->u.kernel.match->me);
1279 	t = compat_ipt_get_target(e);
1280 	module_put(t->u.kernel.target->me);
1281 }
1282 
1283 static int
1284 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1285 				  struct xt_table_info *newinfo,
1286 				  unsigned int *size,
1287 				  const unsigned char *base,
1288 				  const unsigned char *limit)
1289 {
1290 	struct xt_entry_match *ematch;
1291 	struct xt_entry_target *t;
1292 	struct xt_target *target;
1293 	unsigned int entry_offset;
1294 	unsigned int j;
1295 	int ret, off;
1296 
1297 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1298 	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1299 	    (unsigned char *)e + e->next_offset > limit)
1300 		return -EINVAL;
1301 
1302 	if (e->next_offset < sizeof(struct compat_ipt_entry) +
1303 			     sizeof(struct compat_xt_entry_target))
1304 		return -EINVAL;
1305 
1306 	if (!ip_checkentry(&e->ip))
1307 		return -EINVAL;
1308 
1309 	ret = xt_compat_check_entry_offsets(e, e->elems,
1310 					    e->target_offset, e->next_offset);
1311 	if (ret)
1312 		return ret;
1313 
1314 	off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1315 	entry_offset = (void *)e - (void *)base;
1316 	j = 0;
1317 	xt_ematch_foreach(ematch, e) {
1318 		ret = compat_find_calc_match(ematch, &e->ip, &off);
1319 		if (ret != 0)
1320 			goto release_matches;
1321 		++j;
1322 	}
1323 
1324 	t = compat_ipt_get_target(e);
1325 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1326 					t->u.user.revision);
1327 	if (IS_ERR(target)) {
1328 		ret = PTR_ERR(target);
1329 		goto release_matches;
1330 	}
1331 	t->u.kernel.target = target;
1332 
1333 	off += xt_compat_target_offset(target);
1334 	*size += off;
1335 	ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1336 	if (ret)
1337 		goto out;
1338 
1339 	return 0;
1340 
1341 out:
1342 	module_put(t->u.kernel.target->me);
1343 release_matches:
1344 	xt_ematch_foreach(ematch, e) {
1345 		if (j-- == 0)
1346 			break;
1347 		module_put(ematch->u.kernel.match->me);
1348 	}
1349 	return ret;
1350 }
1351 
1352 static void
1353 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1354 			    unsigned int *size,
1355 			    struct xt_table_info *newinfo, unsigned char *base)
1356 {
1357 	struct xt_entry_target *t;
1358 	struct xt_target *target;
1359 	struct ipt_entry *de;
1360 	unsigned int origsize;
1361 	int h;
1362 	struct xt_entry_match *ematch;
1363 
1364 	origsize = *size;
1365 	de = *dstptr;
1366 	memcpy(de, e, sizeof(struct ipt_entry));
1367 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1368 
1369 	*dstptr += sizeof(struct ipt_entry);
1370 	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1371 
1372 	xt_ematch_foreach(ematch, e)
1373 		xt_compat_match_from_user(ematch, dstptr, size);
1374 
1375 	de->target_offset = e->target_offset - (origsize - *size);
1376 	t = compat_ipt_get_target(e);
1377 	target = t->u.kernel.target;
1378 	xt_compat_target_from_user(t, dstptr, size);
1379 
1380 	de->next_offset = e->next_offset - (origsize - *size);
1381 
1382 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1383 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1384 			newinfo->hook_entry[h] -= origsize - *size;
1385 		if ((unsigned char *)de - base < newinfo->underflow[h])
1386 			newinfo->underflow[h] -= origsize - *size;
1387 	}
1388 }
1389 
1390 static int
1391 translate_compat_table(struct net *net,
1392 		       struct xt_table_info **pinfo,
1393 		       void **pentry0,
1394 		       const struct compat_ipt_replace *compatr)
1395 {
1396 	unsigned int i, j;
1397 	struct xt_table_info *newinfo, *info;
1398 	void *pos, *entry0, *entry1;
1399 	struct compat_ipt_entry *iter0;
1400 	struct ipt_replace repl;
1401 	unsigned int size;
1402 	int ret;
1403 
1404 	info = *pinfo;
1405 	entry0 = *pentry0;
1406 	size = compatr->size;
1407 	info->number = compatr->num_entries;
1408 
1409 	j = 0;
1410 	xt_compat_lock(AF_INET);
1411 	xt_compat_init_offsets(AF_INET, compatr->num_entries);
1412 	/* Walk through entries, checking offsets. */
1413 	xt_entry_foreach(iter0, entry0, compatr->size) {
1414 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1415 							entry0,
1416 							entry0 + compatr->size);
1417 		if (ret != 0)
1418 			goto out_unlock;
1419 		++j;
1420 	}
1421 
1422 	ret = -EINVAL;
1423 	if (j != compatr->num_entries)
1424 		goto out_unlock;
1425 
1426 	ret = -ENOMEM;
1427 	newinfo = xt_alloc_table_info(size);
1428 	if (!newinfo)
1429 		goto out_unlock;
1430 
1431 	newinfo->number = compatr->num_entries;
1432 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1433 		newinfo->hook_entry[i] = compatr->hook_entry[i];
1434 		newinfo->underflow[i] = compatr->underflow[i];
1435 	}
1436 	entry1 = newinfo->entries;
1437 	pos = entry1;
1438 	size = compatr->size;
1439 	xt_entry_foreach(iter0, entry0, compatr->size)
1440 		compat_copy_entry_from_user(iter0, &pos, &size,
1441 					    newinfo, entry1);
1442 
1443 	/* all module references in entry0 are now gone.
1444 	 * entry1/newinfo contains a 64bit ruleset that looks exactly as
1445 	 * generated by 64bit userspace.
1446 	 *
1447 	 * Call standard translate_table() to validate all hook_entrys,
1448 	 * underflows, check for loops, etc.
1449 	 */
1450 	xt_compat_flush_offsets(AF_INET);
1451 	xt_compat_unlock(AF_INET);
1452 
1453 	memcpy(&repl, compatr, sizeof(*compatr));
1454 
1455 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1456 		repl.hook_entry[i] = newinfo->hook_entry[i];
1457 		repl.underflow[i] = newinfo->underflow[i];
1458 	}
1459 
1460 	repl.num_counters = 0;
1461 	repl.counters = NULL;
1462 	repl.size = newinfo->size;
1463 	ret = translate_table(net, newinfo, entry1, &repl);
1464 	if (ret)
1465 		goto free_newinfo;
1466 
1467 	*pinfo = newinfo;
1468 	*pentry0 = entry1;
1469 	xt_free_table_info(info);
1470 	return 0;
1471 
1472 free_newinfo:
1473 	xt_free_table_info(newinfo);
1474 	return ret;
1475 out_unlock:
1476 	xt_compat_flush_offsets(AF_INET);
1477 	xt_compat_unlock(AF_INET);
1478 	xt_entry_foreach(iter0, entry0, compatr->size) {
1479 		if (j-- == 0)
1480 			break;
1481 		compat_release_entry(iter0);
1482 	}
1483 	return ret;
1484 }
1485 
1486 static int
1487 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1488 {
1489 	int ret;
1490 	struct compat_ipt_replace tmp;
1491 	struct xt_table_info *newinfo;
1492 	void *loc_cpu_entry;
1493 	struct ipt_entry *iter;
1494 
1495 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1496 		return -EFAULT;
1497 
1498 	/* overflow check */
1499 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1500 		return -ENOMEM;
1501 	if (tmp.num_counters == 0)
1502 		return -EINVAL;
1503 
1504 	tmp.name[sizeof(tmp.name)-1] = 0;
1505 
1506 	newinfo = xt_alloc_table_info(tmp.size);
1507 	if (!newinfo)
1508 		return -ENOMEM;
1509 
1510 	loc_cpu_entry = newinfo->entries;
1511 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1512 			   tmp.size) != 0) {
1513 		ret = -EFAULT;
1514 		goto free_newinfo;
1515 	}
1516 
1517 	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1518 	if (ret != 0)
1519 		goto free_newinfo;
1520 
1521 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1522 			   tmp.num_counters, compat_ptr(tmp.counters));
1523 	if (ret)
1524 		goto free_newinfo_untrans;
1525 	return 0;
1526 
1527  free_newinfo_untrans:
1528 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1529 		cleanup_entry(iter, net);
1530  free_newinfo:
1531 	xt_free_table_info(newinfo);
1532 	return ret;
1533 }
1534 
1535 static int
1536 compat_do_ipt_set_ctl(struct sock *sk,	int cmd, void __user *user,
1537 		      unsigned int len)
1538 {
1539 	int ret;
1540 
1541 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1542 		return -EPERM;
1543 
1544 	switch (cmd) {
1545 	case IPT_SO_SET_REPLACE:
1546 		ret = compat_do_replace(sock_net(sk), user, len);
1547 		break;
1548 
1549 	case IPT_SO_SET_ADD_COUNTERS:
1550 		ret = do_add_counters(sock_net(sk), user, len, 1);
1551 		break;
1552 
1553 	default:
1554 		ret = -EINVAL;
1555 	}
1556 
1557 	return ret;
1558 }
1559 
1560 struct compat_ipt_get_entries {
1561 	char name[XT_TABLE_MAXNAMELEN];
1562 	compat_uint_t size;
1563 	struct compat_ipt_entry entrytable[0];
1564 };
1565 
1566 static int
1567 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1568 			    void __user *userptr)
1569 {
1570 	struct xt_counters *counters;
1571 	const struct xt_table_info *private = table->private;
1572 	void __user *pos;
1573 	unsigned int size;
1574 	int ret = 0;
1575 	unsigned int i = 0;
1576 	struct ipt_entry *iter;
1577 
1578 	counters = alloc_counters(table);
1579 	if (IS_ERR(counters))
1580 		return PTR_ERR(counters);
1581 
1582 	pos = userptr;
1583 	size = total_size;
1584 	xt_entry_foreach(iter, private->entries, total_size) {
1585 		ret = compat_copy_entry_to_user(iter, &pos,
1586 						&size, counters, i++);
1587 		if (ret != 0)
1588 			break;
1589 	}
1590 
1591 	vfree(counters);
1592 	return ret;
1593 }
1594 
1595 static int
1596 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1597 		   int *len)
1598 {
1599 	int ret;
1600 	struct compat_ipt_get_entries get;
1601 	struct xt_table *t;
1602 
1603 	if (*len < sizeof(get))
1604 		return -EINVAL;
1605 
1606 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1607 		return -EFAULT;
1608 
1609 	if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
1610 		return -EINVAL;
1611 
1612 	get.name[sizeof(get.name) - 1] = '\0';
1613 
1614 	xt_compat_lock(AF_INET);
1615 	t = xt_find_table_lock(net, AF_INET, get.name);
1616 	if (t) {
1617 		const struct xt_table_info *private = t->private;
1618 		struct xt_table_info info;
1619 		ret = compat_table_info(private, &info);
1620 		if (!ret && get.size == info.size)
1621 			ret = compat_copy_entries_to_user(private->size,
1622 							  t, uptr->entrytable);
1623 		else if (!ret)
1624 			ret = -EAGAIN;
1625 
1626 		xt_compat_flush_offsets(AF_INET);
1627 		module_put(t->me);
1628 		xt_table_unlock(t);
1629 	} else
1630 		ret = -ENOENT;
1631 
1632 	xt_compat_unlock(AF_INET);
1633 	return ret;
1634 }
1635 
1636 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1637 
1638 static int
1639 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1640 {
1641 	int ret;
1642 
1643 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1644 		return -EPERM;
1645 
1646 	switch (cmd) {
1647 	case IPT_SO_GET_INFO:
1648 		ret = get_info(sock_net(sk), user, len, 1);
1649 		break;
1650 	case IPT_SO_GET_ENTRIES:
1651 		ret = compat_get_entries(sock_net(sk), user, len);
1652 		break;
1653 	default:
1654 		ret = do_ipt_get_ctl(sk, cmd, user, len);
1655 	}
1656 	return ret;
1657 }
1658 #endif
1659 
1660 static int
1661 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1662 {
1663 	int ret;
1664 
1665 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1666 		return -EPERM;
1667 
1668 	switch (cmd) {
1669 	case IPT_SO_SET_REPLACE:
1670 		ret = do_replace(sock_net(sk), user, len);
1671 		break;
1672 
1673 	case IPT_SO_SET_ADD_COUNTERS:
1674 		ret = do_add_counters(sock_net(sk), user, len, 0);
1675 		break;
1676 
1677 	default:
1678 		ret = -EINVAL;
1679 	}
1680 
1681 	return ret;
1682 }
1683 
1684 static int
1685 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1686 {
1687 	int ret;
1688 
1689 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1690 		return -EPERM;
1691 
1692 	switch (cmd) {
1693 	case IPT_SO_GET_INFO:
1694 		ret = get_info(sock_net(sk), user, len, 0);
1695 		break;
1696 
1697 	case IPT_SO_GET_ENTRIES:
1698 		ret = get_entries(sock_net(sk), user, len);
1699 		break;
1700 
1701 	case IPT_SO_GET_REVISION_MATCH:
1702 	case IPT_SO_GET_REVISION_TARGET: {
1703 		struct xt_get_revision rev;
1704 		int target;
1705 
1706 		if (*len != sizeof(rev)) {
1707 			ret = -EINVAL;
1708 			break;
1709 		}
1710 		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1711 			ret = -EFAULT;
1712 			break;
1713 		}
1714 		rev.name[sizeof(rev.name)-1] = 0;
1715 
1716 		if (cmd == IPT_SO_GET_REVISION_TARGET)
1717 			target = 1;
1718 		else
1719 			target = 0;
1720 
1721 		try_then_request_module(xt_find_revision(AF_INET, rev.name,
1722 							 rev.revision,
1723 							 target, &ret),
1724 					"ipt_%s", rev.name);
1725 		break;
1726 	}
1727 
1728 	default:
1729 		ret = -EINVAL;
1730 	}
1731 
1732 	return ret;
1733 }
1734 
1735 static void __ipt_unregister_table(struct net *net, struct xt_table *table)
1736 {
1737 	struct xt_table_info *private;
1738 	void *loc_cpu_entry;
1739 	struct module *table_owner = table->me;
1740 	struct ipt_entry *iter;
1741 
1742 	private = xt_unregister_table(table);
1743 
1744 	/* Decrease module usage counts and free resources */
1745 	loc_cpu_entry = private->entries;
1746 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
1747 		cleanup_entry(iter, net);
1748 	if (private->number > private->initial_entries)
1749 		module_put(table_owner);
1750 	xt_free_table_info(private);
1751 }
1752 
1753 int ipt_register_table(struct net *net, const struct xt_table *table,
1754 		       const struct ipt_replace *repl,
1755 		       const struct nf_hook_ops *ops, struct xt_table **res)
1756 {
1757 	int ret;
1758 	struct xt_table_info *newinfo;
1759 	struct xt_table_info bootstrap = {0};
1760 	void *loc_cpu_entry;
1761 	struct xt_table *new_table;
1762 
1763 	newinfo = xt_alloc_table_info(repl->size);
1764 	if (!newinfo)
1765 		return -ENOMEM;
1766 
1767 	loc_cpu_entry = newinfo->entries;
1768 	memcpy(loc_cpu_entry, repl->entries, repl->size);
1769 
1770 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1771 	if (ret != 0)
1772 		goto out_free;
1773 
1774 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
1775 	if (IS_ERR(new_table)) {
1776 		ret = PTR_ERR(new_table);
1777 		goto out_free;
1778 	}
1779 
1780 	/* set res now, will see skbs right after nf_register_net_hooks */
1781 	WRITE_ONCE(*res, new_table);
1782 
1783 	ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1784 	if (ret != 0) {
1785 		__ipt_unregister_table(net, new_table);
1786 		*res = NULL;
1787 	}
1788 
1789 	return ret;
1790 
1791 out_free:
1792 	xt_free_table_info(newinfo);
1793 	return ret;
1794 }
1795 
1796 void ipt_unregister_table(struct net *net, struct xt_table *table,
1797 			  const struct nf_hook_ops *ops)
1798 {
1799 	nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1800 	__ipt_unregister_table(net, table);
1801 }
1802 
1803 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1804 static inline bool
1805 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1806 		     u_int8_t type, u_int8_t code,
1807 		     bool invert)
1808 {
1809 	return ((test_type == 0xFF) ||
1810 		(type == test_type && code >= min_code && code <= max_code))
1811 		^ invert;
1812 }
1813 
1814 static bool
1815 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
1816 {
1817 	const struct icmphdr *ic;
1818 	struct icmphdr _icmph;
1819 	const struct ipt_icmp *icmpinfo = par->matchinfo;
1820 
1821 	/* Must not be a fragment. */
1822 	if (par->fragoff != 0)
1823 		return false;
1824 
1825 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1826 	if (ic == NULL) {
1827 		/* We've been asked to examine this packet, and we
1828 		 * can't.  Hence, no choice but to drop.
1829 		 */
1830 		par->hotdrop = true;
1831 		return false;
1832 	}
1833 
1834 	return icmp_type_code_match(icmpinfo->type,
1835 				    icmpinfo->code[0],
1836 				    icmpinfo->code[1],
1837 				    ic->type, ic->code,
1838 				    !!(icmpinfo->invflags&IPT_ICMP_INV));
1839 }
1840 
1841 static int icmp_checkentry(const struct xt_mtchk_param *par)
1842 {
1843 	const struct ipt_icmp *icmpinfo = par->matchinfo;
1844 
1845 	/* Must specify no unknown invflags */
1846 	return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
1847 }
1848 
1849 static struct xt_target ipt_builtin_tg[] __read_mostly = {
1850 	{
1851 		.name             = XT_STANDARD_TARGET,
1852 		.targetsize       = sizeof(int),
1853 		.family           = NFPROTO_IPV4,
1854 #ifdef CONFIG_COMPAT
1855 		.compatsize       = sizeof(compat_int_t),
1856 		.compat_from_user = compat_standard_from_user,
1857 		.compat_to_user   = compat_standard_to_user,
1858 #endif
1859 	},
1860 	{
1861 		.name             = XT_ERROR_TARGET,
1862 		.target           = ipt_error,
1863 		.targetsize       = XT_FUNCTION_MAXNAMELEN,
1864 		.family           = NFPROTO_IPV4,
1865 	},
1866 };
1867 
1868 static struct nf_sockopt_ops ipt_sockopts = {
1869 	.pf		= PF_INET,
1870 	.set_optmin	= IPT_BASE_CTL,
1871 	.set_optmax	= IPT_SO_SET_MAX+1,
1872 	.set		= do_ipt_set_ctl,
1873 #ifdef CONFIG_COMPAT
1874 	.compat_set	= compat_do_ipt_set_ctl,
1875 #endif
1876 	.get_optmin	= IPT_BASE_CTL,
1877 	.get_optmax	= IPT_SO_GET_MAX+1,
1878 	.get		= do_ipt_get_ctl,
1879 #ifdef CONFIG_COMPAT
1880 	.compat_get	= compat_do_ipt_get_ctl,
1881 #endif
1882 	.owner		= THIS_MODULE,
1883 };
1884 
1885 static struct xt_match ipt_builtin_mt[] __read_mostly = {
1886 	{
1887 		.name       = "icmp",
1888 		.match      = icmp_match,
1889 		.matchsize  = sizeof(struct ipt_icmp),
1890 		.checkentry = icmp_checkentry,
1891 		.proto      = IPPROTO_ICMP,
1892 		.family     = NFPROTO_IPV4,
1893 	},
1894 };
1895 
1896 static int __net_init ip_tables_net_init(struct net *net)
1897 {
1898 	return xt_proto_init(net, NFPROTO_IPV4);
1899 }
1900 
1901 static void __net_exit ip_tables_net_exit(struct net *net)
1902 {
1903 	xt_proto_fini(net, NFPROTO_IPV4);
1904 }
1905 
1906 static struct pernet_operations ip_tables_net_ops = {
1907 	.init = ip_tables_net_init,
1908 	.exit = ip_tables_net_exit,
1909 };
1910 
1911 static int __init ip_tables_init(void)
1912 {
1913 	int ret;
1914 
1915 	ret = register_pernet_subsys(&ip_tables_net_ops);
1916 	if (ret < 0)
1917 		goto err1;
1918 
1919 	/* No one else will be downing sem now, so we won't sleep */
1920 	ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1921 	if (ret < 0)
1922 		goto err2;
1923 	ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1924 	if (ret < 0)
1925 		goto err4;
1926 
1927 	/* Register setsockopt */
1928 	ret = nf_register_sockopt(&ipt_sockopts);
1929 	if (ret < 0)
1930 		goto err5;
1931 
1932 	pr_info("(C) 2000-2006 Netfilter Core Team\n");
1933 	return 0;
1934 
1935 err5:
1936 	xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1937 err4:
1938 	xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1939 err2:
1940 	unregister_pernet_subsys(&ip_tables_net_ops);
1941 err1:
1942 	return ret;
1943 }
1944 
1945 static void __exit ip_tables_fini(void)
1946 {
1947 	nf_unregister_sockopt(&ipt_sockopts);
1948 
1949 	xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1950 	xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1951 	unregister_pernet_subsys(&ip_tables_net_ops);
1952 }
1953 
1954 EXPORT_SYMBOL(ipt_register_table);
1955 EXPORT_SYMBOL(ipt_unregister_table);
1956 EXPORT_SYMBOL(ipt_do_table);
1957 module_init(ip_tables_init);
1958 module_exit(ip_tables_fini);
1959