xref: /openbmc/linux/net/netfilter/nf_conncount.c (revision ba61bb17)
1 /*
2  * count the number of connections matching an arbitrary key.
3  *
4  * (C) 2017 Red Hat GmbH
5  * Author: Florian Westphal <fw@strlen.de>
6  *
7  * split from xt_connlimit.c:
8  *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
9  *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
10  *		only ignore TIME_WAIT or gone connections
11  *   (C) CC Computer Consultants GmbH, 2007
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/jhash.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/rbtree.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/netfilter/nf_conntrack_tcp.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <net/netfilter/nf_conntrack.h>
29 #include <net/netfilter/nf_conntrack_count.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33 
34 #define CONNCOUNT_SLOTS		256U
35 
36 #ifdef CONFIG_LOCKDEP
37 #define CONNCOUNT_LOCK_SLOTS	8U
38 #else
39 #define CONNCOUNT_LOCK_SLOTS	256U
40 #endif
41 
42 #define CONNCOUNT_GC_MAX_NODES	8
43 #define MAX_KEYLEN		5
44 
45 /* we will save the tuples of all connections we care about */
46 struct nf_conncount_tuple {
47 	struct hlist_node		node;
48 	struct nf_conntrack_tuple	tuple;
49 	struct nf_conntrack_zone	zone;
50 };
51 
52 struct nf_conncount_rb {
53 	struct rb_node node;
54 	struct hlist_head hhead; /* connections/hosts in same subnet */
55 	u32 key[MAX_KEYLEN];
56 };
57 
58 static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
59 
60 struct nf_conncount_data {
61 	unsigned int keylen;
62 	struct rb_root root[CONNCOUNT_SLOTS];
63 };
64 
65 static u_int32_t conncount_rnd __read_mostly;
66 static struct kmem_cache *conncount_rb_cachep __read_mostly;
67 static struct kmem_cache *conncount_conn_cachep __read_mostly;
68 
69 static inline bool already_closed(const struct nf_conn *conn)
70 {
71 	if (nf_ct_protonum(conn) == IPPROTO_TCP)
72 		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
73 		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
74 	else
75 		return false;
76 }
77 
78 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
79 {
80 	return memcmp(a, b, klen * sizeof(u32));
81 }
82 
83 bool nf_conncount_add(struct hlist_head *head,
84 		      const struct nf_conntrack_tuple *tuple,
85 		      const struct nf_conntrack_zone *zone)
86 {
87 	struct nf_conncount_tuple *conn;
88 
89 	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
90 	if (conn == NULL)
91 		return false;
92 	conn->tuple = *tuple;
93 	conn->zone = *zone;
94 	hlist_add_head(&conn->node, head);
95 	return true;
96 }
97 EXPORT_SYMBOL_GPL(nf_conncount_add);
98 
99 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
100 				 const struct nf_conntrack_tuple *tuple,
101 				 const struct nf_conntrack_zone *zone,
102 				 bool *addit)
103 {
104 	const struct nf_conntrack_tuple_hash *found;
105 	struct nf_conncount_tuple *conn;
106 	struct hlist_node *n;
107 	struct nf_conn *found_ct;
108 	unsigned int length = 0;
109 
110 	*addit = tuple ? true : false;
111 
112 	/* check the saved connections */
113 	hlist_for_each_entry_safe(conn, n, head, node) {
114 		found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
115 		if (found == NULL) {
116 			hlist_del(&conn->node);
117 			kmem_cache_free(conncount_conn_cachep, conn);
118 			continue;
119 		}
120 
121 		found_ct = nf_ct_tuplehash_to_ctrack(found);
122 
123 		if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
124 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
125 			/*
126 			 * Just to be sure we have it only once in the list.
127 			 * We should not see tuples twice unless someone hooks
128 			 * this into a table without "-p tcp --syn".
129 			 */
130 			*addit = false;
131 		} else if (already_closed(found_ct)) {
132 			/*
133 			 * we do not care about connections which are
134 			 * closed already -> ditch it
135 			 */
136 			nf_ct_put(found_ct);
137 			hlist_del(&conn->node);
138 			kmem_cache_free(conncount_conn_cachep, conn);
139 			continue;
140 		}
141 
142 		nf_ct_put(found_ct);
143 		length++;
144 	}
145 
146 	return length;
147 }
148 EXPORT_SYMBOL_GPL(nf_conncount_lookup);
149 
150 static void tree_nodes_free(struct rb_root *root,
151 			    struct nf_conncount_rb *gc_nodes[],
152 			    unsigned int gc_count)
153 {
154 	struct nf_conncount_rb *rbconn;
155 
156 	while (gc_count) {
157 		rbconn = gc_nodes[--gc_count];
158 		rb_erase(&rbconn->node, root);
159 		kmem_cache_free(conncount_rb_cachep, rbconn);
160 	}
161 }
162 
163 static unsigned int
164 count_tree(struct net *net, struct rb_root *root,
165 	   const u32 *key, u8 keylen,
166 	   const struct nf_conntrack_tuple *tuple,
167 	   const struct nf_conntrack_zone *zone)
168 {
169 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
170 	struct rb_node **rbnode, *parent;
171 	struct nf_conncount_rb *rbconn;
172 	struct nf_conncount_tuple *conn;
173 	unsigned int gc_count;
174 	bool no_gc = false;
175 
176  restart:
177 	gc_count = 0;
178 	parent = NULL;
179 	rbnode = &(root->rb_node);
180 	while (*rbnode) {
181 		int diff;
182 		bool addit;
183 
184 		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
185 
186 		parent = *rbnode;
187 		diff = key_diff(key, rbconn->key, keylen);
188 		if (diff < 0) {
189 			rbnode = &((*rbnode)->rb_left);
190 		} else if (diff > 0) {
191 			rbnode = &((*rbnode)->rb_right);
192 		} else {
193 			/* same source network -> be counted! */
194 			unsigned int count;
195 
196 			count = nf_conncount_lookup(net, &rbconn->hhead, tuple,
197 						    zone, &addit);
198 
199 			tree_nodes_free(root, gc_nodes, gc_count);
200 			if (!addit)
201 				return count;
202 
203 			if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
204 				return 0; /* hotdrop */
205 
206 			return count + 1;
207 		}
208 
209 		if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
210 			continue;
211 
212 		/* only used for GC on hhead, retval and 'addit' ignored */
213 		nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit);
214 		if (hlist_empty(&rbconn->hhead))
215 			gc_nodes[gc_count++] = rbconn;
216 	}
217 
218 	if (gc_count) {
219 		no_gc = true;
220 		tree_nodes_free(root, gc_nodes, gc_count);
221 		/* tree_node_free before new allocation permits
222 		 * allocator to re-use newly free'd object.
223 		 *
224 		 * This is a rare event; in most cases we will find
225 		 * existing node to re-use. (or gc_count is 0).
226 		 */
227 		goto restart;
228 	}
229 
230 	if (!tuple)
231 		return 0;
232 
233 	/* no match, need to insert new node */
234 	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
235 	if (rbconn == NULL)
236 		return 0;
237 
238 	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
239 	if (conn == NULL) {
240 		kmem_cache_free(conncount_rb_cachep, rbconn);
241 		return 0;
242 	}
243 
244 	conn->tuple = *tuple;
245 	conn->zone = *zone;
246 	memcpy(rbconn->key, key, sizeof(u32) * keylen);
247 
248 	INIT_HLIST_HEAD(&rbconn->hhead);
249 	hlist_add_head(&conn->node, &rbconn->hhead);
250 
251 	rb_link_node(&rbconn->node, parent, rbnode);
252 	rb_insert_color(&rbconn->node, root);
253 	return 1;
254 }
255 
256 /* Count and return number of conntrack entries in 'net' with particular 'key'.
257  * If 'tuple' is not null, insert it into the accounting data structure.
258  */
259 unsigned int nf_conncount_count(struct net *net,
260 				struct nf_conncount_data *data,
261 				const u32 *key,
262 				const struct nf_conntrack_tuple *tuple,
263 				const struct nf_conntrack_zone *zone)
264 {
265 	struct rb_root *root;
266 	int count;
267 	u32 hash;
268 
269 	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
270 	root = &data->root[hash];
271 
272 	spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
273 
274 	count = count_tree(net, root, key, data->keylen, tuple, zone);
275 
276 	spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
277 
278 	return count;
279 }
280 EXPORT_SYMBOL_GPL(nf_conncount_count);
281 
282 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
283 					    unsigned int keylen)
284 {
285 	struct nf_conncount_data *data;
286 	int ret, i;
287 
288 	if (keylen % sizeof(u32) ||
289 	    keylen / sizeof(u32) > MAX_KEYLEN ||
290 	    keylen == 0)
291 		return ERR_PTR(-EINVAL);
292 
293 	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
294 
295 	data = kmalloc(sizeof(*data), GFP_KERNEL);
296 	if (!data)
297 		return ERR_PTR(-ENOMEM);
298 
299 	ret = nf_ct_netns_get(net, family);
300 	if (ret < 0) {
301 		kfree(data);
302 		return ERR_PTR(ret);
303 	}
304 
305 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
306 		data->root[i] = RB_ROOT;
307 
308 	data->keylen = keylen / sizeof(u32);
309 
310 	return data;
311 }
312 EXPORT_SYMBOL_GPL(nf_conncount_init);
313 
314 void nf_conncount_cache_free(struct hlist_head *hhead)
315 {
316 	struct nf_conncount_tuple *conn;
317 	struct hlist_node *n;
318 
319 	hlist_for_each_entry_safe(conn, n, hhead, node)
320 		kmem_cache_free(conncount_conn_cachep, conn);
321 }
322 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
323 
324 static void destroy_tree(struct rb_root *r)
325 {
326 	struct nf_conncount_rb *rbconn;
327 	struct rb_node *node;
328 
329 	while ((node = rb_first(r)) != NULL) {
330 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
331 
332 		rb_erase(node, r);
333 
334 		nf_conncount_cache_free(&rbconn->hhead);
335 
336 		kmem_cache_free(conncount_rb_cachep, rbconn);
337 	}
338 }
339 
340 void nf_conncount_destroy(struct net *net, unsigned int family,
341 			  struct nf_conncount_data *data)
342 {
343 	unsigned int i;
344 
345 	nf_ct_netns_put(net, family);
346 
347 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
348 		destroy_tree(&data->root[i]);
349 
350 	kfree(data);
351 }
352 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
353 
354 static int __init nf_conncount_modinit(void)
355 {
356 	int i;
357 
358 	BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
359 	BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
360 
361 	for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
362 		spin_lock_init(&nf_conncount_locks[i]);
363 
364 	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
365 					   sizeof(struct nf_conncount_tuple),
366 					   0, 0, NULL);
367 	if (!conncount_conn_cachep)
368 		return -ENOMEM;
369 
370 	conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
371 					   sizeof(struct nf_conncount_rb),
372 					   0, 0, NULL);
373 	if (!conncount_rb_cachep) {
374 		kmem_cache_destroy(conncount_conn_cachep);
375 		return -ENOMEM;
376 	}
377 
378 	return 0;
379 }
380 
381 static void __exit nf_conncount_modexit(void)
382 {
383 	kmem_cache_destroy(conncount_conn_cachep);
384 	kmem_cache_destroy(conncount_rb_cachep);
385 }
386 
387 module_init(nf_conncount_modinit);
388 module_exit(nf_conncount_modexit);
389 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
390 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
391 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
392 MODULE_LICENSE("GPL");
393