xref: /openbmc/linux/net/netfilter/nf_conncount.c (revision 54921e9a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * count the number of connections matching an arbitrary key.
4  *
5  * (C) 2017 Red Hat GmbH
6  * Author: Florian Westphal <fw@strlen.de>
7  *
8  * split from xt_connlimit.c:
9  *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
10  *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
11  *		only ignore TIME_WAIT or gone connections
12  *   (C) CC Computer Consultants GmbH, 2007
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_count.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34 
35 #define CONNCOUNT_SLOTS		256U
36 
37 #define CONNCOUNT_GC_MAX_NODES	8
38 #define MAX_KEYLEN		5
39 
40 /* we will save the tuples of all connections we care about */
41 struct nf_conncount_tuple {
42 	struct list_head		node;
43 	struct nf_conntrack_tuple	tuple;
44 	struct nf_conntrack_zone	zone;
45 	int				cpu;
46 	u32				jiffies32;
47 };
48 
49 struct nf_conncount_rb {
50 	struct rb_node node;
51 	struct nf_conncount_list list;
52 	u32 key[MAX_KEYLEN];
53 	struct rcu_head rcu_head;
54 };
55 
56 static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
57 
58 struct nf_conncount_data {
59 	unsigned int keylen;
60 	struct rb_root root[CONNCOUNT_SLOTS];
61 	struct net *net;
62 	struct work_struct gc_work;
63 	unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
64 	unsigned int gc_tree;
65 };
66 
67 static u_int32_t conncount_rnd __read_mostly;
68 static struct kmem_cache *conncount_rb_cachep __read_mostly;
69 static struct kmem_cache *conncount_conn_cachep __read_mostly;
70 
already_closed(const struct nf_conn * conn)71 static inline bool already_closed(const struct nf_conn *conn)
72 {
73 	if (nf_ct_protonum(conn) == IPPROTO_TCP)
74 		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
75 		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
76 	else
77 		return false;
78 }
79 
key_diff(const u32 * a,const u32 * b,unsigned int klen)80 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
81 {
82 	return memcmp(a, b, klen * sizeof(u32));
83 }
84 
conn_free(struct nf_conncount_list * list,struct nf_conncount_tuple * conn)85 static void conn_free(struct nf_conncount_list *list,
86 		      struct nf_conncount_tuple *conn)
87 {
88 	lockdep_assert_held(&list->list_lock);
89 
90 	list->count--;
91 	list_del(&conn->node);
92 
93 	kmem_cache_free(conncount_conn_cachep, conn);
94 }
95 
96 static const struct nf_conntrack_tuple_hash *
find_or_evict(struct net * net,struct nf_conncount_list * list,struct nf_conncount_tuple * conn)97 find_or_evict(struct net *net, struct nf_conncount_list *list,
98 	      struct nf_conncount_tuple *conn)
99 {
100 	const struct nf_conntrack_tuple_hash *found;
101 	unsigned long a, b;
102 	int cpu = raw_smp_processor_id();
103 	u32 age;
104 
105 	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
106 	if (found)
107 		return found;
108 	b = conn->jiffies32;
109 	a = (u32)jiffies;
110 
111 	/* conn might have been added just before by another cpu and
112 	 * might still be unconfirmed.  In this case, nf_conntrack_find()
113 	 * returns no result.  Thus only evict if this cpu added the
114 	 * stale entry or if the entry is older than two jiffies.
115 	 */
116 	age = a - b;
117 	if (conn->cpu == cpu || age >= 2) {
118 		conn_free(list, conn);
119 		return ERR_PTR(-ENOENT);
120 	}
121 
122 	return ERR_PTR(-EAGAIN);
123 }
124 
__nf_conncount_add(struct net * net,struct nf_conncount_list * list,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone)125 static int __nf_conncount_add(struct net *net,
126 			      struct nf_conncount_list *list,
127 			      const struct nf_conntrack_tuple *tuple,
128 			      const struct nf_conntrack_zone *zone)
129 {
130 	const struct nf_conntrack_tuple_hash *found;
131 	struct nf_conncount_tuple *conn, *conn_n;
132 	struct nf_conn *found_ct;
133 	unsigned int collect = 0;
134 
135 	if (time_is_after_eq_jiffies((unsigned long)list->last_gc))
136 		goto add_new_node;
137 
138 	/* check the saved connections */
139 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
140 		if (collect > CONNCOUNT_GC_MAX_NODES)
141 			break;
142 
143 		found = find_or_evict(net, list, conn);
144 		if (IS_ERR(found)) {
145 			/* Not found, but might be about to be confirmed */
146 			if (PTR_ERR(found) == -EAGAIN) {
147 				if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
148 				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
149 				    nf_ct_zone_id(zone, zone->dir))
150 					return 0; /* already exists */
151 			} else {
152 				collect++;
153 			}
154 			continue;
155 		}
156 
157 		found_ct = nf_ct_tuplehash_to_ctrack(found);
158 
159 		if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
160 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
161 			/*
162 			 * We should not see tuples twice unless someone hooks
163 			 * this into a table without "-p tcp --syn".
164 			 *
165 			 * Attempt to avoid a re-add in this case.
166 			 */
167 			nf_ct_put(found_ct);
168 			return 0;
169 		} else if (already_closed(found_ct)) {
170 			/*
171 			 * we do not care about connections which are
172 			 * closed already -> ditch it
173 			 */
174 			nf_ct_put(found_ct);
175 			conn_free(list, conn);
176 			collect++;
177 			continue;
178 		}
179 
180 		nf_ct_put(found_ct);
181 	}
182 
183 add_new_node:
184 	if (WARN_ON_ONCE(list->count > INT_MAX))
185 		return -EOVERFLOW;
186 
187 	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
188 	if (conn == NULL)
189 		return -ENOMEM;
190 
191 	conn->tuple = *tuple;
192 	conn->zone = *zone;
193 	conn->cpu = raw_smp_processor_id();
194 	conn->jiffies32 = (u32)jiffies;
195 	list_add_tail(&conn->node, &list->head);
196 	list->count++;
197 	list->last_gc = (u32)jiffies;
198 	return 0;
199 }
200 
nf_conncount_add(struct net * net,struct nf_conncount_list * list,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone)201 int nf_conncount_add(struct net *net,
202 		     struct nf_conncount_list *list,
203 		     const struct nf_conntrack_tuple *tuple,
204 		     const struct nf_conntrack_zone *zone)
205 {
206 	int ret;
207 
208 	/* check the saved connections */
209 	spin_lock_bh(&list->list_lock);
210 	ret = __nf_conncount_add(net, list, tuple, zone);
211 	spin_unlock_bh(&list->list_lock);
212 
213 	return ret;
214 }
215 EXPORT_SYMBOL_GPL(nf_conncount_add);
216 
nf_conncount_list_init(struct nf_conncount_list * list)217 void nf_conncount_list_init(struct nf_conncount_list *list)
218 {
219 	spin_lock_init(&list->list_lock);
220 	INIT_LIST_HEAD(&list->head);
221 	list->count = 0;
222 	list->last_gc = (u32)jiffies;
223 }
224 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
225 
226 /* Return true if the list is empty. Must be called with BH disabled. */
nf_conncount_gc_list(struct net * net,struct nf_conncount_list * list)227 bool nf_conncount_gc_list(struct net *net,
228 			  struct nf_conncount_list *list)
229 {
230 	const struct nf_conntrack_tuple_hash *found;
231 	struct nf_conncount_tuple *conn, *conn_n;
232 	struct nf_conn *found_ct;
233 	unsigned int collected = 0;
234 	bool ret = false;
235 
236 	/* don't bother if we just did GC */
237 	if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc)))
238 		return false;
239 
240 	/* don't bother if other cpu is already doing GC */
241 	if (!spin_trylock(&list->list_lock))
242 		return false;
243 
244 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
245 		found = find_or_evict(net, list, conn);
246 		if (IS_ERR(found)) {
247 			if (PTR_ERR(found) == -ENOENT)
248 				collected++;
249 			continue;
250 		}
251 
252 		found_ct = nf_ct_tuplehash_to_ctrack(found);
253 		if (already_closed(found_ct)) {
254 			/*
255 			 * we do not care about connections which are
256 			 * closed already -> ditch it
257 			 */
258 			nf_ct_put(found_ct);
259 			conn_free(list, conn);
260 			collected++;
261 			continue;
262 		}
263 
264 		nf_ct_put(found_ct);
265 		if (collected > CONNCOUNT_GC_MAX_NODES)
266 			break;
267 	}
268 
269 	if (!list->count)
270 		ret = true;
271 	list->last_gc = (u32)jiffies;
272 	spin_unlock(&list->list_lock);
273 
274 	return ret;
275 }
276 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
277 
__tree_nodes_free(struct rcu_head * h)278 static void __tree_nodes_free(struct rcu_head *h)
279 {
280 	struct nf_conncount_rb *rbconn;
281 
282 	rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
283 	kmem_cache_free(conncount_rb_cachep, rbconn);
284 }
285 
286 /* caller must hold tree nf_conncount_locks[] lock */
tree_nodes_free(struct rb_root * root,struct nf_conncount_rb * gc_nodes[],unsigned int gc_count)287 static void tree_nodes_free(struct rb_root *root,
288 			    struct nf_conncount_rb *gc_nodes[],
289 			    unsigned int gc_count)
290 {
291 	struct nf_conncount_rb *rbconn;
292 
293 	while (gc_count) {
294 		rbconn = gc_nodes[--gc_count];
295 		spin_lock(&rbconn->list.list_lock);
296 		if (!rbconn->list.count) {
297 			rb_erase(&rbconn->node, root);
298 			call_rcu(&rbconn->rcu_head, __tree_nodes_free);
299 		}
300 		spin_unlock(&rbconn->list.list_lock);
301 	}
302 }
303 
schedule_gc_worker(struct nf_conncount_data * data,int tree)304 static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
305 {
306 	set_bit(tree, data->pending_trees);
307 	schedule_work(&data->gc_work);
308 }
309 
310 static unsigned int
insert_tree(struct net * net,struct nf_conncount_data * data,struct rb_root * root,unsigned int hash,const u32 * key,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone)311 insert_tree(struct net *net,
312 	    struct nf_conncount_data *data,
313 	    struct rb_root *root,
314 	    unsigned int hash,
315 	    const u32 *key,
316 	    const struct nf_conntrack_tuple *tuple,
317 	    const struct nf_conntrack_zone *zone)
318 {
319 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
320 	struct rb_node **rbnode, *parent;
321 	struct nf_conncount_rb *rbconn;
322 	struct nf_conncount_tuple *conn;
323 	unsigned int count = 0, gc_count = 0;
324 	bool do_gc = true;
325 
326 	spin_lock_bh(&nf_conncount_locks[hash]);
327 restart:
328 	parent = NULL;
329 	rbnode = &(root->rb_node);
330 	while (*rbnode) {
331 		int diff;
332 		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
333 
334 		parent = *rbnode;
335 		diff = key_diff(key, rbconn->key, data->keylen);
336 		if (diff < 0) {
337 			rbnode = &((*rbnode)->rb_left);
338 		} else if (diff > 0) {
339 			rbnode = &((*rbnode)->rb_right);
340 		} else {
341 			int ret;
342 
343 			ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
344 			if (ret)
345 				count = 0; /* hotdrop */
346 			else
347 				count = rbconn->list.count;
348 			tree_nodes_free(root, gc_nodes, gc_count);
349 			goto out_unlock;
350 		}
351 
352 		if (gc_count >= ARRAY_SIZE(gc_nodes))
353 			continue;
354 
355 		if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
356 			gc_nodes[gc_count++] = rbconn;
357 	}
358 
359 	if (gc_count) {
360 		tree_nodes_free(root, gc_nodes, gc_count);
361 		schedule_gc_worker(data, hash);
362 		gc_count = 0;
363 		do_gc = false;
364 		goto restart;
365 	}
366 
367 	/* expected case: match, insert new node */
368 	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
369 	if (rbconn == NULL)
370 		goto out_unlock;
371 
372 	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
373 	if (conn == NULL) {
374 		kmem_cache_free(conncount_rb_cachep, rbconn);
375 		goto out_unlock;
376 	}
377 
378 	conn->tuple = *tuple;
379 	conn->zone = *zone;
380 	memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
381 
382 	nf_conncount_list_init(&rbconn->list);
383 	list_add(&conn->node, &rbconn->list.head);
384 	count = 1;
385 	rbconn->list.count = count;
386 
387 	rb_link_node_rcu(&rbconn->node, parent, rbnode);
388 	rb_insert_color(&rbconn->node, root);
389 out_unlock:
390 	spin_unlock_bh(&nf_conncount_locks[hash]);
391 	return count;
392 }
393 
394 static unsigned int
count_tree(struct net * net,struct nf_conncount_data * data,const u32 * key,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone)395 count_tree(struct net *net,
396 	   struct nf_conncount_data *data,
397 	   const u32 *key,
398 	   const struct nf_conntrack_tuple *tuple,
399 	   const struct nf_conntrack_zone *zone)
400 {
401 	struct rb_root *root;
402 	struct rb_node *parent;
403 	struct nf_conncount_rb *rbconn;
404 	unsigned int hash;
405 
406 	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
407 	root = &data->root[hash];
408 
409 	parent = rcu_dereference_raw(root->rb_node);
410 	while (parent) {
411 		int diff;
412 
413 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
414 
415 		diff = key_diff(key, rbconn->key, data->keylen);
416 		if (diff < 0) {
417 			parent = rcu_dereference_raw(parent->rb_left);
418 		} else if (diff > 0) {
419 			parent = rcu_dereference_raw(parent->rb_right);
420 		} else {
421 			int ret;
422 
423 			if (!tuple) {
424 				nf_conncount_gc_list(net, &rbconn->list);
425 				return rbconn->list.count;
426 			}
427 
428 			spin_lock_bh(&rbconn->list.list_lock);
429 			/* Node might be about to be free'd.
430 			 * We need to defer to insert_tree() in this case.
431 			 */
432 			if (rbconn->list.count == 0) {
433 				spin_unlock_bh(&rbconn->list.list_lock);
434 				break;
435 			}
436 
437 			/* same source network -> be counted! */
438 			ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
439 			spin_unlock_bh(&rbconn->list.list_lock);
440 			if (ret)
441 				return 0; /* hotdrop */
442 			else
443 				return rbconn->list.count;
444 		}
445 	}
446 
447 	if (!tuple)
448 		return 0;
449 
450 	return insert_tree(net, data, root, hash, key, tuple, zone);
451 }
452 
tree_gc_worker(struct work_struct * work)453 static void tree_gc_worker(struct work_struct *work)
454 {
455 	struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
456 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
457 	struct rb_root *root;
458 	struct rb_node *node;
459 	unsigned int tree, next_tree, gc_count = 0;
460 
461 	tree = data->gc_tree % CONNCOUNT_SLOTS;
462 	root = &data->root[tree];
463 
464 	local_bh_disable();
465 	rcu_read_lock();
466 	for (node = rb_first(root); node != NULL; node = rb_next(node)) {
467 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
468 		if (nf_conncount_gc_list(data->net, &rbconn->list))
469 			gc_count++;
470 	}
471 	rcu_read_unlock();
472 	local_bh_enable();
473 
474 	cond_resched();
475 
476 	spin_lock_bh(&nf_conncount_locks[tree]);
477 	if (gc_count < ARRAY_SIZE(gc_nodes))
478 		goto next; /* do not bother */
479 
480 	gc_count = 0;
481 	node = rb_first(root);
482 	while (node != NULL) {
483 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
484 		node = rb_next(node);
485 
486 		if (rbconn->list.count > 0)
487 			continue;
488 
489 		gc_nodes[gc_count++] = rbconn;
490 		if (gc_count >= ARRAY_SIZE(gc_nodes)) {
491 			tree_nodes_free(root, gc_nodes, gc_count);
492 			gc_count = 0;
493 		}
494 	}
495 
496 	tree_nodes_free(root, gc_nodes, gc_count);
497 next:
498 	clear_bit(tree, data->pending_trees);
499 
500 	next_tree = (tree + 1) % CONNCOUNT_SLOTS;
501 	next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
502 
503 	if (next_tree < CONNCOUNT_SLOTS) {
504 		data->gc_tree = next_tree;
505 		schedule_work(work);
506 	}
507 
508 	spin_unlock_bh(&nf_conncount_locks[tree]);
509 }
510 
511 /* Count and return number of conntrack entries in 'net' with particular 'key'.
512  * If 'tuple' is not null, insert it into the accounting data structure.
513  * Call with RCU read lock.
514  */
nf_conncount_count(struct net * net,struct nf_conncount_data * data,const u32 * key,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone)515 unsigned int nf_conncount_count(struct net *net,
516 				struct nf_conncount_data *data,
517 				const u32 *key,
518 				const struct nf_conntrack_tuple *tuple,
519 				const struct nf_conntrack_zone *zone)
520 {
521 	return count_tree(net, data, key, tuple, zone);
522 }
523 EXPORT_SYMBOL_GPL(nf_conncount_count);
524 
nf_conncount_init(struct net * net,unsigned int family,unsigned int keylen)525 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
526 					    unsigned int keylen)
527 {
528 	struct nf_conncount_data *data;
529 	int ret, i;
530 
531 	if (keylen % sizeof(u32) ||
532 	    keylen / sizeof(u32) > MAX_KEYLEN ||
533 	    keylen == 0)
534 		return ERR_PTR(-EINVAL);
535 
536 	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
537 
538 	data = kmalloc(sizeof(*data), GFP_KERNEL);
539 	if (!data)
540 		return ERR_PTR(-ENOMEM);
541 
542 	ret = nf_ct_netns_get(net, family);
543 	if (ret < 0) {
544 		kfree(data);
545 		return ERR_PTR(ret);
546 	}
547 
548 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
549 		data->root[i] = RB_ROOT;
550 
551 	data->keylen = keylen / sizeof(u32);
552 	data->net = net;
553 	INIT_WORK(&data->gc_work, tree_gc_worker);
554 
555 	return data;
556 }
557 EXPORT_SYMBOL_GPL(nf_conncount_init);
558 
nf_conncount_cache_free(struct nf_conncount_list * list)559 void nf_conncount_cache_free(struct nf_conncount_list *list)
560 {
561 	struct nf_conncount_tuple *conn, *conn_n;
562 
563 	list_for_each_entry_safe(conn, conn_n, &list->head, node)
564 		kmem_cache_free(conncount_conn_cachep, conn);
565 }
566 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
567 
destroy_tree(struct rb_root * r)568 static void destroy_tree(struct rb_root *r)
569 {
570 	struct nf_conncount_rb *rbconn;
571 	struct rb_node *node;
572 
573 	while ((node = rb_first(r)) != NULL) {
574 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
575 
576 		rb_erase(node, r);
577 
578 		nf_conncount_cache_free(&rbconn->list);
579 
580 		kmem_cache_free(conncount_rb_cachep, rbconn);
581 	}
582 }
583 
nf_conncount_destroy(struct net * net,unsigned int family,struct nf_conncount_data * data)584 void nf_conncount_destroy(struct net *net, unsigned int family,
585 			  struct nf_conncount_data *data)
586 {
587 	unsigned int i;
588 
589 	cancel_work_sync(&data->gc_work);
590 	nf_ct_netns_put(net, family);
591 
592 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
593 		destroy_tree(&data->root[i]);
594 
595 	kfree(data);
596 }
597 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
598 
nf_conncount_modinit(void)599 static int __init nf_conncount_modinit(void)
600 {
601 	int i;
602 
603 	for (i = 0; i < CONNCOUNT_SLOTS; ++i)
604 		spin_lock_init(&nf_conncount_locks[i]);
605 
606 	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
607 					   sizeof(struct nf_conncount_tuple),
608 					   0, 0, NULL);
609 	if (!conncount_conn_cachep)
610 		return -ENOMEM;
611 
612 	conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
613 					   sizeof(struct nf_conncount_rb),
614 					   0, 0, NULL);
615 	if (!conncount_rb_cachep) {
616 		kmem_cache_destroy(conncount_conn_cachep);
617 		return -ENOMEM;
618 	}
619 
620 	return 0;
621 }
622 
nf_conncount_modexit(void)623 static void __exit nf_conncount_modexit(void)
624 {
625 	kmem_cache_destroy(conncount_conn_cachep);
626 	kmem_cache_destroy(conncount_rb_cachep);
627 }
628 
629 module_init(nf_conncount_modinit);
630 module_exit(nf_conncount_modexit);
631 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
632 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
633 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
634 MODULE_LICENSE("GPL");
635