1 #ifndef _X_TABLES_H
2 #define _X_TABLES_H
3 
4 
5 #include <linux/netdevice.h>
6 #include <linux/static_key.h>
7 #include <uapi/linux/netfilter/x_tables.h>
8 
9 /**
10  * struct xt_action_param - parameters for matches/targets
11  *
12  * @match:	the match extension
13  * @target:	the target extension
14  * @matchinfo:	per-match data
15  * @targetinfo:	per-target data
16  * @net		network namespace through which the action was invoked
17  * @in:		input netdevice
18  * @out:	output netdevice
19  * @fragoff:	packet is a fragment, this is the data offset
20  * @thoff:	position of transport header relative to skb->data
21  * @hook:	hook number given packet came from
22  * @family:	Actual NFPROTO_* through which the function is invoked
23  * 		(helpful when match->family == NFPROTO_UNSPEC)
24  *
25  * Fields written to by extensions:
26  *
27  * @hotdrop:	drop packet if we had inspection problems
28  */
29 struct xt_action_param {
30 	union {
31 		const struct xt_match *match;
32 		const struct xt_target *target;
33 	};
34 	union {
35 		const void *matchinfo, *targinfo;
36 	};
37 	struct net *net;
38 	const struct net_device *in, *out;
39 	int fragoff;
40 	unsigned int thoff;
41 	unsigned int hooknum;
42 	u_int8_t family;
43 	bool hotdrop;
44 };
45 
46 /**
47  * struct xt_mtchk_param - parameters for match extensions'
48  * checkentry functions
49  *
50  * @net:	network namespace through which the check was invoked
51  * @table:	table the rule is tried to be inserted into
52  * @entryinfo:	the family-specific rule data
53  * 		(struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
54  * @match:	struct xt_match through which this function was invoked
55  * @matchinfo:	per-match data
56  * @hook_mask:	via which hooks the new rule is reachable
57  * Other fields as above.
58  */
59 struct xt_mtchk_param {
60 	struct net *net;
61 	const char *table;
62 	const void *entryinfo;
63 	const struct xt_match *match;
64 	void *matchinfo;
65 	unsigned int hook_mask;
66 	u_int8_t family;
67 	bool nft_compat;
68 };
69 
70 /**
71  * struct xt_mdtor_param - match destructor parameters
72  * Fields as above.
73  */
74 struct xt_mtdtor_param {
75 	struct net *net;
76 	const struct xt_match *match;
77 	void *matchinfo;
78 	u_int8_t family;
79 };
80 
81 /**
82  * struct xt_tgchk_param - parameters for target extensions'
83  * checkentry functions
84  *
85  * @entryinfo:	the family-specific rule data
86  * 		(struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
87  *
88  * Other fields see above.
89  */
90 struct xt_tgchk_param {
91 	struct net *net;
92 	const char *table;
93 	const void *entryinfo;
94 	const struct xt_target *target;
95 	void *targinfo;
96 	unsigned int hook_mask;
97 	u_int8_t family;
98 	bool nft_compat;
99 };
100 
101 /* Target destructor parameters */
102 struct xt_tgdtor_param {
103 	struct net *net;
104 	const struct xt_target *target;
105 	void *targinfo;
106 	u_int8_t family;
107 };
108 
109 struct xt_match {
110 	struct list_head list;
111 
112 	const char name[XT_EXTENSION_MAXNAMELEN];
113 	u_int8_t revision;
114 
115 	/* Return true or false: return FALSE and set *hotdrop = 1 to
116            force immediate packet drop. */
117 	/* Arguments changed since 2.6.9, as this must now handle
118 	   non-linear skb, using skb_header_pointer and
119 	   skb_ip_make_writable. */
120 	bool (*match)(const struct sk_buff *skb,
121 		      struct xt_action_param *);
122 
123 	/* Called when user tries to insert an entry of this type. */
124 	int (*checkentry)(const struct xt_mtchk_param *);
125 
126 	/* Called when entry of this type deleted. */
127 	void (*destroy)(const struct xt_mtdtor_param *);
128 #ifdef CONFIG_COMPAT
129 	/* Called when userspace align differs from kernel space one */
130 	void (*compat_from_user)(void *dst, const void *src);
131 	int (*compat_to_user)(void __user *dst, const void *src);
132 #endif
133 	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
134 	struct module *me;
135 
136 	const char *table;
137 	unsigned int matchsize;
138 #ifdef CONFIG_COMPAT
139 	unsigned int compatsize;
140 #endif
141 	unsigned int hooks;
142 	unsigned short proto;
143 
144 	unsigned short family;
145 };
146 
147 /* Registration hooks for targets. */
148 struct xt_target {
149 	struct list_head list;
150 
151 	const char name[XT_EXTENSION_MAXNAMELEN];
152 	u_int8_t revision;
153 
154 	/* Returns verdict. Argument order changed since 2.6.9, as this
155 	   must now handle non-linear skbs, using skb_copy_bits and
156 	   skb_ip_make_writable. */
157 	unsigned int (*target)(struct sk_buff *skb,
158 			       const struct xt_action_param *);
159 
160 	/* Called when user tries to insert an entry of this type:
161            hook_mask is a bitmask of hooks from which it can be
162            called. */
163 	/* Should return 0 on success or an error code otherwise (-Exxxx). */
164 	int (*checkentry)(const struct xt_tgchk_param *);
165 
166 	/* Called when entry of this type deleted. */
167 	void (*destroy)(const struct xt_tgdtor_param *);
168 #ifdef CONFIG_COMPAT
169 	/* Called when userspace align differs from kernel space one */
170 	void (*compat_from_user)(void *dst, const void *src);
171 	int (*compat_to_user)(void __user *dst, const void *src);
172 #endif
173 	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
174 	struct module *me;
175 
176 	const char *table;
177 	unsigned int targetsize;
178 #ifdef CONFIG_COMPAT
179 	unsigned int compatsize;
180 #endif
181 	unsigned int hooks;
182 	unsigned short proto;
183 
184 	unsigned short family;
185 };
186 
187 /* Furniture shopping... */
188 struct xt_table {
189 	struct list_head list;
190 
191 	/* What hooks you will enter on */
192 	unsigned int valid_hooks;
193 
194 	/* Man behind the curtain... */
195 	struct xt_table_info *private;
196 
197 	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
198 	struct module *me;
199 
200 	u_int8_t af;		/* address/protocol family */
201 	int priority;		/* hook order */
202 
203 	/* A unique name... */
204 	const char name[XT_TABLE_MAXNAMELEN];
205 };
206 
207 #include <linux/netfilter_ipv4.h>
208 
209 /* The table itself */
210 struct xt_table_info {
211 	/* Size per table */
212 	unsigned int size;
213 	/* Number of entries: FIXME. --RR */
214 	unsigned int number;
215 	/* Initial number of entries. Needed for module usage count */
216 	unsigned int initial_entries;
217 
218 	/* Entry points and underflows */
219 	unsigned int hook_entry[NF_INET_NUMHOOKS];
220 	unsigned int underflow[NF_INET_NUMHOOKS];
221 
222 	/*
223 	 * Number of user chains. Since tables cannot have loops, at most
224 	 * @stacksize jumps (number of user chains) can possibly be made.
225 	 */
226 	unsigned int stacksize;
227 	void ***jumpstack;
228 
229 	unsigned char entries[0] __aligned(8);
230 };
231 
232 int xt_register_target(struct xt_target *target);
233 void xt_unregister_target(struct xt_target *target);
234 int xt_register_targets(struct xt_target *target, unsigned int n);
235 void xt_unregister_targets(struct xt_target *target, unsigned int n);
236 
237 int xt_register_match(struct xt_match *target);
238 void xt_unregister_match(struct xt_match *target);
239 int xt_register_matches(struct xt_match *match, unsigned int n);
240 void xt_unregister_matches(struct xt_match *match, unsigned int n);
241 
242 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
243 		   bool inv_proto);
244 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
245 		    bool inv_proto);
246 
247 struct xt_table *xt_register_table(struct net *net,
248 				   const struct xt_table *table,
249 				   struct xt_table_info *bootstrap,
250 				   struct xt_table_info *newinfo);
251 void *xt_unregister_table(struct xt_table *table);
252 
253 struct xt_table_info *xt_replace_table(struct xt_table *table,
254 				       unsigned int num_counters,
255 				       struct xt_table_info *newinfo,
256 				       int *error);
257 
258 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
259 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
260 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
261 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
262 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
263 		     int *err);
264 
265 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
266 				    const char *name);
267 void xt_table_unlock(struct xt_table *t);
268 
269 int xt_proto_init(struct net *net, u_int8_t af);
270 void xt_proto_fini(struct net *net, u_int8_t af);
271 
272 struct xt_table_info *xt_alloc_table_info(unsigned int size);
273 void xt_free_table_info(struct xt_table_info *info);
274 
275 /**
276  * xt_recseq - recursive seqcount for netfilter use
277  *
278  * Packet processing changes the seqcount only if no recursion happened
279  * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
280  * because we use the normal seqcount convention :
281  * Low order bit set to 1 if a writer is active.
282  */
283 DECLARE_PER_CPU(seqcount_t, xt_recseq);
284 
285 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
286  *
287  * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
288  */
289 extern struct static_key xt_tee_enabled;
290 
291 /**
292  * xt_write_recseq_begin - start of a write section
293  *
294  * Begin packet processing : all readers must wait the end
295  * 1) Must be called with preemption disabled
296  * 2) softirqs must be disabled too (or we should use this_cpu_add())
297  * Returns :
298  *  1 if no recursion on this cpu
299  *  0 if recursion detected
300  */
301 static inline unsigned int xt_write_recseq_begin(void)
302 {
303 	unsigned int addend;
304 
305 	/*
306 	 * Low order bit of sequence is set if we already
307 	 * called xt_write_recseq_begin().
308 	 */
309 	addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
310 
311 	/*
312 	 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
313 	 * We dont check addend value to avoid a test and conditional jump,
314 	 * since addend is most likely 1
315 	 */
316 	__this_cpu_add(xt_recseq.sequence, addend);
317 	smp_wmb();
318 
319 	return addend;
320 }
321 
322 /**
323  * xt_write_recseq_end - end of a write section
324  * @addend: return value from previous xt_write_recseq_begin()
325  *
326  * End packet processing : all readers can proceed
327  * 1) Must be called with preemption disabled
328  * 2) softirqs must be disabled too (or we should use this_cpu_add())
329  */
330 static inline void xt_write_recseq_end(unsigned int addend)
331 {
332 	/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
333 	smp_wmb();
334 	__this_cpu_add(xt_recseq.sequence, addend);
335 }
336 
337 /*
338  * This helper is performance critical and must be inlined
339  */
340 static inline unsigned long ifname_compare_aligned(const char *_a,
341 						   const char *_b,
342 						   const char *_mask)
343 {
344 	const unsigned long *a = (const unsigned long *)_a;
345 	const unsigned long *b = (const unsigned long *)_b;
346 	const unsigned long *mask = (const unsigned long *)_mask;
347 	unsigned long ret;
348 
349 	ret = (a[0] ^ b[0]) & mask[0];
350 	if (IFNAMSIZ > sizeof(unsigned long))
351 		ret |= (a[1] ^ b[1]) & mask[1];
352 	if (IFNAMSIZ > 2 * sizeof(unsigned long))
353 		ret |= (a[2] ^ b[2]) & mask[2];
354 	if (IFNAMSIZ > 3 * sizeof(unsigned long))
355 		ret |= (a[3] ^ b[3]) & mask[3];
356 	BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
357 	return ret;
358 }
359 
360 
361 /* On SMP, ip(6)t_entry->counters.pcnt holds address of the
362  * real (percpu) counter.  On !SMP, its just the packet count,
363  * so nothing needs to be done there.
364  *
365  * xt_percpu_counter_alloc returns the address of the percpu
366  * counter, or 0 on !SMP. We force an alignment of 16 bytes
367  * so that bytes/packets share a common cache line.
368  *
369  * Hence caller must use IS_ERR_VALUE to check for error, this
370  * allows us to return 0 for single core systems without forcing
371  * callers to deal with SMP vs. NONSMP issues.
372  */
373 static inline u64 xt_percpu_counter_alloc(void)
374 {
375 	if (nr_cpu_ids > 1) {
376 		void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
377 						    sizeof(struct xt_counters));
378 
379 		if (res == NULL)
380 			return (u64) -ENOMEM;
381 
382 		return (u64) (__force unsigned long) res;
383 	}
384 
385 	return 0;
386 }
387 static inline void xt_percpu_counter_free(u64 pcnt)
388 {
389 	if (nr_cpu_ids > 1)
390 		free_percpu((void __percpu *) (unsigned long) pcnt);
391 }
392 
393 static inline struct xt_counters *
394 xt_get_this_cpu_counter(struct xt_counters *cnt)
395 {
396 	if (nr_cpu_ids > 1)
397 		return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
398 
399 	return cnt;
400 }
401 
402 static inline struct xt_counters *
403 xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
404 {
405 	if (nr_cpu_ids > 1)
406 		return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
407 
408 	return cnt;
409 }
410 
411 struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
412 void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
413 
414 #ifdef CONFIG_COMPAT
415 #include <net/compat.h>
416 
417 struct compat_xt_entry_match {
418 	union {
419 		struct {
420 			u_int16_t match_size;
421 			char name[XT_FUNCTION_MAXNAMELEN - 1];
422 			u_int8_t revision;
423 		} user;
424 		struct {
425 			u_int16_t match_size;
426 			compat_uptr_t match;
427 		} kernel;
428 		u_int16_t match_size;
429 	} u;
430 	unsigned char data[0];
431 };
432 
433 struct compat_xt_entry_target {
434 	union {
435 		struct {
436 			u_int16_t target_size;
437 			char name[XT_FUNCTION_MAXNAMELEN - 1];
438 			u_int8_t revision;
439 		} user;
440 		struct {
441 			u_int16_t target_size;
442 			compat_uptr_t target;
443 		} kernel;
444 		u_int16_t target_size;
445 	} u;
446 	unsigned char data[0];
447 };
448 
449 /* FIXME: this works only on 32 bit tasks
450  * need to change whole approach in order to calculate align as function of
451  * current task alignment */
452 
453 struct compat_xt_counters {
454 	compat_u64 pcnt, bcnt;			/* Packet and byte counters */
455 };
456 
457 struct compat_xt_counters_info {
458 	char name[XT_TABLE_MAXNAMELEN];
459 	compat_uint_t num_counters;
460 	struct compat_xt_counters counters[0];
461 };
462 
463 struct _compat_xt_align {
464 	__u8 u8;
465 	__u16 u16;
466 	__u32 u32;
467 	compat_u64 u64;
468 };
469 
470 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
471 
472 void xt_compat_lock(u_int8_t af);
473 void xt_compat_unlock(u_int8_t af);
474 
475 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
476 void xt_compat_flush_offsets(u_int8_t af);
477 void xt_compat_init_offsets(u_int8_t af, unsigned int number);
478 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
479 
480 int xt_compat_match_offset(const struct xt_match *match);
481 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
482 			      unsigned int *size);
483 int xt_compat_match_to_user(const struct xt_entry_match *m,
484 			    void __user **dstptr, unsigned int *size);
485 
486 int xt_compat_target_offset(const struct xt_target *target);
487 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
488 				unsigned int *size);
489 int xt_compat_target_to_user(const struct xt_entry_target *t,
490 			     void __user **dstptr, unsigned int *size);
491 
492 #endif /* CONFIG_COMPAT */
493 #endif /* _X_TABLES_H */
494