1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Expectation handling for nf_conntrack. */
3 
4 /* (C) 1999-2001 Paul `Rusty' Russell
5  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
6  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7  * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
8  */
9 
10 #include <linux/types.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/stddef.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/jhash.h>
21 #include <linux/moduleparam.h>
22 #include <linux/export.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/hash.h>
25 
26 #include <net/netfilter/nf_conntrack.h>
27 #include <net/netfilter/nf_conntrack_core.h>
28 #include <net/netfilter/nf_conntrack_ecache.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_l4proto.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34 
35 unsigned int nf_ct_expect_hsize __read_mostly;
36 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37 
38 struct hlist_head *nf_ct_expect_hash __read_mostly;
39 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
40 
41 unsigned int nf_ct_expect_max __read_mostly;
42 
43 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
44 static unsigned int nf_ct_expect_hashrnd __read_mostly;
45 
46 /* nf_conntrack_expect helper functions */
47 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
48 				u32 portid, int report)
49 {
50 	struct nf_conn_help *master_help = nfct_help(exp->master);
51 	struct net *net = nf_ct_exp_net(exp);
52 
53 	WARN_ON(!master_help);
54 	WARN_ON(timer_pending(&exp->timeout));
55 
56 	hlist_del_rcu(&exp->hnode);
57 	net->ct.expect_count--;
58 
59 	hlist_del_rcu(&exp->lnode);
60 	master_help->expecting[exp->class]--;
61 
62 	nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
63 	nf_ct_expect_put(exp);
64 
65 	NF_CT_STAT_INC(net, expect_delete);
66 }
67 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
68 
69 static void nf_ct_expectation_timed_out(struct timer_list *t)
70 {
71 	struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
72 
73 	spin_lock_bh(&nf_conntrack_expect_lock);
74 	nf_ct_unlink_expect(exp);
75 	spin_unlock_bh(&nf_conntrack_expect_lock);
76 	nf_ct_expect_put(exp);
77 }
78 
79 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
80 {
81 	unsigned int hash, seed;
82 
83 	get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
84 
85 	seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
86 
87 	hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
88 		      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
89 		       (__force __u16)tuple->dst.u.all) ^ seed);
90 
91 	return reciprocal_scale(hash, nf_ct_expect_hsize);
92 }
93 
94 static bool
95 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
96 		const struct nf_conntrack_expect *i,
97 		const struct nf_conntrack_zone *zone,
98 		const struct net *net)
99 {
100 	return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
101 	       net_eq(net, nf_ct_net(i->master)) &&
102 	       nf_ct_zone_equal_any(i->master, zone);
103 }
104 
105 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
106 {
107 	if (del_timer(&exp->timeout)) {
108 		nf_ct_unlink_expect(exp);
109 		nf_ct_expect_put(exp);
110 		return true;
111 	}
112 	return false;
113 }
114 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
115 
116 struct nf_conntrack_expect *
117 __nf_ct_expect_find(struct net *net,
118 		    const struct nf_conntrack_zone *zone,
119 		    const struct nf_conntrack_tuple *tuple)
120 {
121 	struct nf_conntrack_expect *i;
122 	unsigned int h;
123 
124 	if (!net->ct.expect_count)
125 		return NULL;
126 
127 	h = nf_ct_expect_dst_hash(net, tuple);
128 	hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
129 		if (nf_ct_exp_equal(tuple, i, zone, net))
130 			return i;
131 	}
132 	return NULL;
133 }
134 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
135 
136 /* Just find a expectation corresponding to a tuple. */
137 struct nf_conntrack_expect *
138 nf_ct_expect_find_get(struct net *net,
139 		      const struct nf_conntrack_zone *zone,
140 		      const struct nf_conntrack_tuple *tuple)
141 {
142 	struct nf_conntrack_expect *i;
143 
144 	rcu_read_lock();
145 	i = __nf_ct_expect_find(net, zone, tuple);
146 	if (i && !refcount_inc_not_zero(&i->use))
147 		i = NULL;
148 	rcu_read_unlock();
149 
150 	return i;
151 }
152 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
153 
154 /* If an expectation for this connection is found, it gets delete from
155  * global list then returned. */
156 struct nf_conntrack_expect *
157 nf_ct_find_expectation(struct net *net,
158 		       const struct nf_conntrack_zone *zone,
159 		       const struct nf_conntrack_tuple *tuple)
160 {
161 	struct nf_conntrack_expect *i, *exp = NULL;
162 	unsigned int h;
163 
164 	if (!net->ct.expect_count)
165 		return NULL;
166 
167 	h = nf_ct_expect_dst_hash(net, tuple);
168 	hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
169 		if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
170 		    nf_ct_exp_equal(tuple, i, zone, net)) {
171 			exp = i;
172 			break;
173 		}
174 	}
175 	if (!exp)
176 		return NULL;
177 
178 	/* If master is not in hash table yet (ie. packet hasn't left
179 	   this machine yet), how can other end know about expected?
180 	   Hence these are not the droids you are looking for (if
181 	   master ct never got confirmed, we'd hold a reference to it
182 	   and weird things would happen to future packets). */
183 	if (!nf_ct_is_confirmed(exp->master))
184 		return NULL;
185 
186 	/* Avoid race with other CPUs, that for exp->master ct, is
187 	 * about to invoke ->destroy(), or nf_ct_delete() via timeout
188 	 * or early_drop().
189 	 *
190 	 * The atomic_inc_not_zero() check tells:  If that fails, we
191 	 * know that the ct is being destroyed.  If it succeeds, we
192 	 * can be sure the ct cannot disappear underneath.
193 	 */
194 	if (unlikely(nf_ct_is_dying(exp->master) ||
195 		     !atomic_inc_not_zero(&exp->master->ct_general.use)))
196 		return NULL;
197 
198 	if (exp->flags & NF_CT_EXPECT_PERMANENT) {
199 		refcount_inc(&exp->use);
200 		return exp;
201 	} else if (del_timer(&exp->timeout)) {
202 		nf_ct_unlink_expect(exp);
203 		return exp;
204 	}
205 	/* Undo exp->master refcnt increase, if del_timer() failed */
206 	nf_ct_put(exp->master);
207 
208 	return NULL;
209 }
210 
211 /* delete all expectations for this conntrack */
212 void nf_ct_remove_expectations(struct nf_conn *ct)
213 {
214 	struct nf_conn_help *help = nfct_help(ct);
215 	struct nf_conntrack_expect *exp;
216 	struct hlist_node *next;
217 
218 	/* Optimization: most connection never expect any others. */
219 	if (!help)
220 		return;
221 
222 	spin_lock_bh(&nf_conntrack_expect_lock);
223 	hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
224 		nf_ct_remove_expect(exp);
225 	}
226 	spin_unlock_bh(&nf_conntrack_expect_lock);
227 }
228 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
229 
230 /* Would two expected things clash? */
231 static inline int expect_clash(const struct nf_conntrack_expect *a,
232 			       const struct nf_conntrack_expect *b)
233 {
234 	/* Part covered by intersection of masks must be unequal,
235 	   otherwise they clash */
236 	struct nf_conntrack_tuple_mask intersect_mask;
237 	int count;
238 
239 	intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
240 
241 	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
242 		intersect_mask.src.u3.all[count] =
243 			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
244 	}
245 
246 	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
247 	       net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
248 	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
249 }
250 
251 static inline int expect_matches(const struct nf_conntrack_expect *a,
252 				 const struct nf_conntrack_expect *b)
253 {
254 	return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
255 	       nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
256 	       net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
257 	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
258 }
259 
260 static bool master_matches(const struct nf_conntrack_expect *a,
261 			   const struct nf_conntrack_expect *b,
262 			   unsigned int flags)
263 {
264 	if (flags & NF_CT_EXP_F_SKIP_MASTER)
265 		return true;
266 
267 	return a->master == b->master;
268 }
269 
270 /* Generally a bad idea to call this: could have matched already. */
271 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
272 {
273 	spin_lock_bh(&nf_conntrack_expect_lock);
274 	nf_ct_remove_expect(exp);
275 	spin_unlock_bh(&nf_conntrack_expect_lock);
276 }
277 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
278 
279 /* We don't increase the master conntrack refcount for non-fulfilled
280  * conntracks. During the conntrack destruction, the expectations are
281  * always killed before the conntrack itself */
282 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
283 {
284 	struct nf_conntrack_expect *new;
285 
286 	new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
287 	if (!new)
288 		return NULL;
289 
290 	new->master = me;
291 	refcount_set(&new->use, 1);
292 	return new;
293 }
294 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
295 
296 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
297 		       u_int8_t family,
298 		       const union nf_inet_addr *saddr,
299 		       const union nf_inet_addr *daddr,
300 		       u_int8_t proto, const __be16 *src, const __be16 *dst)
301 {
302 	int len;
303 
304 	if (family == AF_INET)
305 		len = 4;
306 	else
307 		len = 16;
308 
309 	exp->flags = 0;
310 	exp->class = class;
311 	exp->expectfn = NULL;
312 	exp->helper = NULL;
313 	exp->tuple.src.l3num = family;
314 	exp->tuple.dst.protonum = proto;
315 
316 	if (saddr) {
317 		memcpy(&exp->tuple.src.u3, saddr, len);
318 		if (sizeof(exp->tuple.src.u3) > len)
319 			/* address needs to be cleared for nf_ct_tuple_equal */
320 			memset((void *)&exp->tuple.src.u3 + len, 0x00,
321 			       sizeof(exp->tuple.src.u3) - len);
322 		memset(&exp->mask.src.u3, 0xFF, len);
323 		if (sizeof(exp->mask.src.u3) > len)
324 			memset((void *)&exp->mask.src.u3 + len, 0x00,
325 			       sizeof(exp->mask.src.u3) - len);
326 	} else {
327 		memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
328 		memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
329 	}
330 
331 	if (src) {
332 		exp->tuple.src.u.all = *src;
333 		exp->mask.src.u.all = htons(0xFFFF);
334 	} else {
335 		exp->tuple.src.u.all = 0;
336 		exp->mask.src.u.all = 0;
337 	}
338 
339 	memcpy(&exp->tuple.dst.u3, daddr, len);
340 	if (sizeof(exp->tuple.dst.u3) > len)
341 		/* address needs to be cleared for nf_ct_tuple_equal */
342 		memset((void *)&exp->tuple.dst.u3 + len, 0x00,
343 		       sizeof(exp->tuple.dst.u3) - len);
344 
345 	exp->tuple.dst.u.all = *dst;
346 
347 #if IS_ENABLED(CONFIG_NF_NAT)
348 	memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
349 	memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
350 #endif
351 }
352 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
353 
354 static void nf_ct_expect_free_rcu(struct rcu_head *head)
355 {
356 	struct nf_conntrack_expect *exp;
357 
358 	exp = container_of(head, struct nf_conntrack_expect, rcu);
359 	kmem_cache_free(nf_ct_expect_cachep, exp);
360 }
361 
362 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
363 {
364 	if (refcount_dec_and_test(&exp->use))
365 		call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
366 }
367 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
368 
369 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
370 {
371 	struct nf_conn_help *master_help = nfct_help(exp->master);
372 	struct nf_conntrack_helper *helper;
373 	struct net *net = nf_ct_exp_net(exp);
374 	unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
375 
376 	/* two references : one for hash insert, one for the timer */
377 	refcount_add(2, &exp->use);
378 
379 	timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
380 	helper = rcu_dereference_protected(master_help->helper,
381 					   lockdep_is_held(&nf_conntrack_expect_lock));
382 	if (helper) {
383 		exp->timeout.expires = jiffies +
384 			helper->expect_policy[exp->class].timeout * HZ;
385 	}
386 	add_timer(&exp->timeout);
387 
388 	hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
389 	master_help->expecting[exp->class]++;
390 
391 	hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
392 	net->ct.expect_count++;
393 
394 	NF_CT_STAT_INC(net, expect_create);
395 }
396 
397 /* Race with expectations being used means we could have none to find; OK. */
398 static void evict_oldest_expect(struct nf_conn *master,
399 				struct nf_conntrack_expect *new)
400 {
401 	struct nf_conn_help *master_help = nfct_help(master);
402 	struct nf_conntrack_expect *exp, *last = NULL;
403 
404 	hlist_for_each_entry(exp, &master_help->expectations, lnode) {
405 		if (exp->class == new->class)
406 			last = exp;
407 	}
408 
409 	if (last)
410 		nf_ct_remove_expect(last);
411 }
412 
413 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
414 				       unsigned int flags)
415 {
416 	const struct nf_conntrack_expect_policy *p;
417 	struct nf_conntrack_expect *i;
418 	struct nf_conn *master = expect->master;
419 	struct nf_conn_help *master_help = nfct_help(master);
420 	struct nf_conntrack_helper *helper;
421 	struct net *net = nf_ct_exp_net(expect);
422 	struct hlist_node *next;
423 	unsigned int h;
424 	int ret = 0;
425 
426 	if (!master_help) {
427 		ret = -ESHUTDOWN;
428 		goto out;
429 	}
430 	h = nf_ct_expect_dst_hash(net, &expect->tuple);
431 	hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
432 		if (master_matches(i, expect, flags) &&
433 		    expect_matches(i, expect)) {
434 			if (i->class != expect->class ||
435 			    i->master != expect->master)
436 				return -EALREADY;
437 
438 			if (nf_ct_remove_expect(i))
439 				break;
440 		} else if (expect_clash(i, expect)) {
441 			ret = -EBUSY;
442 			goto out;
443 		}
444 	}
445 	/* Will be over limit? */
446 	helper = rcu_dereference_protected(master_help->helper,
447 					   lockdep_is_held(&nf_conntrack_expect_lock));
448 	if (helper) {
449 		p = &helper->expect_policy[expect->class];
450 		if (p->max_expected &&
451 		    master_help->expecting[expect->class] >= p->max_expected) {
452 			evict_oldest_expect(master, expect);
453 			if (master_help->expecting[expect->class]
454 						>= p->max_expected) {
455 				ret = -EMFILE;
456 				goto out;
457 			}
458 		}
459 	}
460 
461 	if (net->ct.expect_count >= nf_ct_expect_max) {
462 		net_warn_ratelimited("nf_conntrack: expectation table full\n");
463 		ret = -EMFILE;
464 	}
465 out:
466 	return ret;
467 }
468 
469 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
470 				u32 portid, int report, unsigned int flags)
471 {
472 	int ret;
473 
474 	spin_lock_bh(&nf_conntrack_expect_lock);
475 	ret = __nf_ct_expect_check(expect, flags);
476 	if (ret < 0)
477 		goto out;
478 
479 	nf_ct_expect_insert(expect);
480 
481 	spin_unlock_bh(&nf_conntrack_expect_lock);
482 	nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
483 	return 0;
484 out:
485 	spin_unlock_bh(&nf_conntrack_expect_lock);
486 	return ret;
487 }
488 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
489 
490 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
491 				  void *data)
492 {
493 	struct nf_conntrack_expect *exp;
494 	const struct hlist_node *next;
495 	unsigned int i;
496 
497 	spin_lock_bh(&nf_conntrack_expect_lock);
498 
499 	for (i = 0; i < nf_ct_expect_hsize; i++) {
500 		hlist_for_each_entry_safe(exp, next,
501 					  &nf_ct_expect_hash[i],
502 					  hnode) {
503 			if (iter(exp, data) && del_timer(&exp->timeout)) {
504 				nf_ct_unlink_expect(exp);
505 				nf_ct_expect_put(exp);
506 			}
507 		}
508 	}
509 
510 	spin_unlock_bh(&nf_conntrack_expect_lock);
511 }
512 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
513 
514 void nf_ct_expect_iterate_net(struct net *net,
515 			      bool (*iter)(struct nf_conntrack_expect *e, void *data),
516 			      void *data,
517 			      u32 portid, int report)
518 {
519 	struct nf_conntrack_expect *exp;
520 	const struct hlist_node *next;
521 	unsigned int i;
522 
523 	spin_lock_bh(&nf_conntrack_expect_lock);
524 
525 	for (i = 0; i < nf_ct_expect_hsize; i++) {
526 		hlist_for_each_entry_safe(exp, next,
527 					  &nf_ct_expect_hash[i],
528 					  hnode) {
529 
530 			if (!net_eq(nf_ct_exp_net(exp), net))
531 				continue;
532 
533 			if (iter(exp, data) && del_timer(&exp->timeout)) {
534 				nf_ct_unlink_expect_report(exp, portid, report);
535 				nf_ct_expect_put(exp);
536 			}
537 		}
538 	}
539 
540 	spin_unlock_bh(&nf_conntrack_expect_lock);
541 }
542 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
543 
544 #ifdef CONFIG_NF_CONNTRACK_PROCFS
545 struct ct_expect_iter_state {
546 	struct seq_net_private p;
547 	unsigned int bucket;
548 };
549 
550 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
551 {
552 	struct ct_expect_iter_state *st = seq->private;
553 	struct hlist_node *n;
554 
555 	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
556 		n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
557 		if (n)
558 			return n;
559 	}
560 	return NULL;
561 }
562 
563 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
564 					     struct hlist_node *head)
565 {
566 	struct ct_expect_iter_state *st = seq->private;
567 
568 	head = rcu_dereference(hlist_next_rcu(head));
569 	while (head == NULL) {
570 		if (++st->bucket >= nf_ct_expect_hsize)
571 			return NULL;
572 		head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
573 	}
574 	return head;
575 }
576 
577 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
578 {
579 	struct hlist_node *head = ct_expect_get_first(seq);
580 
581 	if (head)
582 		while (pos && (head = ct_expect_get_next(seq, head)))
583 			pos--;
584 	return pos ? NULL : head;
585 }
586 
587 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
588 	__acquires(RCU)
589 {
590 	rcu_read_lock();
591 	return ct_expect_get_idx(seq, *pos);
592 }
593 
594 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
595 {
596 	(*pos)++;
597 	return ct_expect_get_next(seq, v);
598 }
599 
600 static void exp_seq_stop(struct seq_file *seq, void *v)
601 	__releases(RCU)
602 {
603 	rcu_read_unlock();
604 }
605 
606 static int exp_seq_show(struct seq_file *s, void *v)
607 {
608 	struct nf_conntrack_expect *expect;
609 	struct nf_conntrack_helper *helper;
610 	struct hlist_node *n = v;
611 	char *delim = "";
612 
613 	expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
614 
615 	if (expect->timeout.function)
616 		seq_printf(s, "%ld ", timer_pending(&expect->timeout)
617 			   ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
618 	else
619 		seq_puts(s, "- ");
620 	seq_printf(s, "l3proto = %u proto=%u ",
621 		   expect->tuple.src.l3num,
622 		   expect->tuple.dst.protonum);
623 	print_tuple(s, &expect->tuple,
624 		    nf_ct_l4proto_find(expect->tuple.dst.protonum));
625 
626 	if (expect->flags & NF_CT_EXPECT_PERMANENT) {
627 		seq_puts(s, "PERMANENT");
628 		delim = ",";
629 	}
630 	if (expect->flags & NF_CT_EXPECT_INACTIVE) {
631 		seq_printf(s, "%sINACTIVE", delim);
632 		delim = ",";
633 	}
634 	if (expect->flags & NF_CT_EXPECT_USERSPACE)
635 		seq_printf(s, "%sUSERSPACE", delim);
636 
637 	helper = rcu_dereference(nfct_help(expect->master)->helper);
638 	if (helper) {
639 		seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
640 		if (helper->expect_policy[expect->class].name[0])
641 			seq_printf(s, "/%s",
642 				   helper->expect_policy[expect->class].name);
643 	}
644 
645 	seq_putc(s, '\n');
646 
647 	return 0;
648 }
649 
650 static const struct seq_operations exp_seq_ops = {
651 	.start = exp_seq_start,
652 	.next = exp_seq_next,
653 	.stop = exp_seq_stop,
654 	.show = exp_seq_show
655 };
656 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
657 
658 static int exp_proc_init(struct net *net)
659 {
660 #ifdef CONFIG_NF_CONNTRACK_PROCFS
661 	struct proc_dir_entry *proc;
662 	kuid_t root_uid;
663 	kgid_t root_gid;
664 
665 	proc = proc_create_net("nf_conntrack_expect", 0440, net->proc_net,
666 			&exp_seq_ops, sizeof(struct ct_expect_iter_state));
667 	if (!proc)
668 		return -ENOMEM;
669 
670 	root_uid = make_kuid(net->user_ns, 0);
671 	root_gid = make_kgid(net->user_ns, 0);
672 	if (uid_valid(root_uid) && gid_valid(root_gid))
673 		proc_set_user(proc, root_uid, root_gid);
674 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
675 	return 0;
676 }
677 
678 static void exp_proc_remove(struct net *net)
679 {
680 #ifdef CONFIG_NF_CONNTRACK_PROCFS
681 	remove_proc_entry("nf_conntrack_expect", net->proc_net);
682 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
683 }
684 
685 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
686 
687 int nf_conntrack_expect_pernet_init(struct net *net)
688 {
689 	net->ct.expect_count = 0;
690 	return exp_proc_init(net);
691 }
692 
693 void nf_conntrack_expect_pernet_fini(struct net *net)
694 {
695 	exp_proc_remove(net);
696 }
697 
698 int nf_conntrack_expect_init(void)
699 {
700 	if (!nf_ct_expect_hsize) {
701 		nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
702 		if (!nf_ct_expect_hsize)
703 			nf_ct_expect_hsize = 1;
704 	}
705 	nf_ct_expect_max = nf_ct_expect_hsize * 4;
706 	nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
707 				sizeof(struct nf_conntrack_expect),
708 				0, 0, NULL);
709 	if (!nf_ct_expect_cachep)
710 		return -ENOMEM;
711 
712 	nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
713 	if (!nf_ct_expect_hash) {
714 		kmem_cache_destroy(nf_ct_expect_cachep);
715 		return -ENOMEM;
716 	}
717 
718 	return 0;
719 }
720 
721 void nf_conntrack_expect_fini(void)
722 {
723 	rcu_barrier(); /* Wait for call_rcu() before destroy */
724 	kmem_cache_destroy(nf_ct_expect_cachep);
725 	kvfree(nf_ct_expect_hash);
726 }
727