xref: /openbmc/linux/net/ipv4/inetpeer.c (revision 367b8112)
1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <net/ip.h>
21 #include <net/inetpeer.h>
22 
23 /*
24  *  Theory of operations.
25  *  We keep one entry for each peer IP address.  The nodes contains long-living
26  *  information about the peer which doesn't depend on routes.
27  *  At this moment this information consists only of ID field for the next
28  *  outgoing IP packet.  This field is incremented with each packet as encoded
29  *  in inet_getid() function (include/net/inetpeer.h).
30  *  At the moment of writing this notes identifier of IP packets is generated
31  *  to be unpredictable using this code only for packets subjected
32  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
33  *  PMTU in size uses a constant ID and do not use this code (see
34  *  ip_select_ident() in include/net/ip.h).
35  *
36  *  Route cache entries hold references to our nodes.
37  *  New cache entries get references via lookup by destination IP address in
38  *  the avl tree.  The reference is grabbed only when it's needed i.e. only
39  *  when we try to output IP packet which needs an unpredictable ID (see
40  *  __ip_select_ident() in net/ipv4/route.c).
41  *  Nodes are removed only when reference counter goes to 0.
42  *  When it's happened the node may be removed when a sufficient amount of
43  *  time has been passed since its last use.  The less-recently-used entry can
44  *  also be removed if the pool is overloaded i.e. if the total amount of
45  *  entries is greater-or-equal than the threshold.
46  *
47  *  Node pool is organised as an AVL tree.
48  *  Such an implementation has been chosen not just for fun.  It's a way to
49  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
50  *  amount of long living nodes in a single hash slot would significantly delay
51  *  lookups performed with disabled BHs.
52  *
53  *  Serialisation issues.
54  *  1.  Nodes may appear in the tree only with the pool write lock held.
55  *  2.  Nodes may disappear from the tree only with the pool write lock held
56  *      AND reference count being 0.
57  *  3.  Nodes appears and disappears from unused node list only under
58  *      "inet_peer_unused_lock".
59  *  4.  Global variable peer_total is modified under the pool lock.
60  *  5.  struct inet_peer fields modification:
61  *		avl_left, avl_right, avl_parent, avl_height: pool lock
62  *		unused: unused node list lock
63  *		refcnt: atomically against modifications on other CPU;
64  *		   usually under some other lock to prevent node disappearing
65  *		dtime: unused node list lock
66  *		v4daddr: unchangeable
67  *		ip_id_count: idlock
68  */
69 
70 /* Exported for inet_getid inline function.  */
71 DEFINE_SPINLOCK(inet_peer_idlock);
72 
73 static struct kmem_cache *peer_cachep __read_mostly;
74 
75 #define node_height(x) x->avl_height
76 static struct inet_peer peer_fake_node = {
77 	.avl_left	= &peer_fake_node,
78 	.avl_right	= &peer_fake_node,
79 	.avl_height	= 0
80 };
81 #define peer_avl_empty (&peer_fake_node)
82 static struct inet_peer *peer_root = peer_avl_empty;
83 static DEFINE_RWLOCK(peer_pool_lock);
84 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
85 
86 static int peer_total;
87 /* Exported for sysctl_net_ipv4.  */
88 int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
89 					 * aggressively at this stage */
90 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
91 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
92 int inet_peer_gc_mintime __read_mostly = 10 * HZ;
93 int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
94 
95 static LIST_HEAD(unused_peers);
96 static DEFINE_SPINLOCK(inet_peer_unused_lock);
97 
98 static void peer_check_expire(unsigned long dummy);
99 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
100 
101 
102 /* Called from ip_output.c:ip_init  */
103 void __init inet_initpeers(void)
104 {
105 	struct sysinfo si;
106 
107 	/* Use the straight interface to information about memory. */
108 	si_meminfo(&si);
109 	/* The values below were suggested by Alexey Kuznetsov
110 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
111 	 * myself.  --SAW
112 	 */
113 	if (si.totalram <= (32768*1024)/PAGE_SIZE)
114 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
115 	if (si.totalram <= (16384*1024)/PAGE_SIZE)
116 		inet_peer_threshold >>= 1; /* about 512KB */
117 	if (si.totalram <= (8192*1024)/PAGE_SIZE)
118 		inet_peer_threshold >>= 2; /* about 128KB */
119 
120 	peer_cachep = kmem_cache_create("inet_peer_cache",
121 			sizeof(struct inet_peer),
122 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
123 			NULL);
124 
125 	/* All the timers, started at system startup tend
126 	   to synchronize. Perturb it a bit.
127 	 */
128 	peer_periodic_timer.expires = jiffies
129 		+ net_random() % inet_peer_gc_maxtime
130 		+ inet_peer_gc_maxtime;
131 	add_timer(&peer_periodic_timer);
132 }
133 
134 /* Called with or without local BH being disabled. */
135 static void unlink_from_unused(struct inet_peer *p)
136 {
137 	spin_lock_bh(&inet_peer_unused_lock);
138 	list_del_init(&p->unused);
139 	spin_unlock_bh(&inet_peer_unused_lock);
140 }
141 
142 /*
143  * Called with local BH disabled and the pool lock held.
144  * _stack is known to be NULL or not at compile time,
145  * so compiler will optimize the if (_stack) tests.
146  */
147 #define lookup(_daddr,_stack) 					\
148 ({								\
149 	struct inet_peer *u, **v;				\
150 	if (_stack != NULL) {					\
151 		stackptr = _stack;				\
152 		*stackptr++ = &peer_root;			\
153 	}							\
154 	for (u = peer_root; u != peer_avl_empty; ) {		\
155 		if (_daddr == u->v4daddr)			\
156 			break;					\
157 		if ((__force __u32)_daddr < (__force __u32)u->v4daddr)	\
158 			v = &u->avl_left;			\
159 		else						\
160 			v = &u->avl_right;			\
161 		if (_stack != NULL)				\
162 			*stackptr++ = v;			\
163 		u = *v;						\
164 	}							\
165 	u;							\
166 })
167 
168 /* Called with local BH disabled and the pool write lock held. */
169 #define lookup_rightempty(start)				\
170 ({								\
171 	struct inet_peer *u, **v;				\
172 	*stackptr++ = &start->avl_left;				\
173 	v = &start->avl_left;					\
174 	for (u = *v; u->avl_right != peer_avl_empty; ) {	\
175 		v = &u->avl_right;				\
176 		*stackptr++ = v;				\
177 		u = *v;						\
178 	}							\
179 	u;							\
180 })
181 
182 /* Called with local BH disabled and the pool write lock held.
183  * Variable names are the proof of operation correctness.
184  * Look into mm/map_avl.c for more detail description of the ideas.  */
185 static void peer_avl_rebalance(struct inet_peer **stack[],
186 		struct inet_peer ***stackend)
187 {
188 	struct inet_peer **nodep, *node, *l, *r;
189 	int lh, rh;
190 
191 	while (stackend > stack) {
192 		nodep = *--stackend;
193 		node = *nodep;
194 		l = node->avl_left;
195 		r = node->avl_right;
196 		lh = node_height(l);
197 		rh = node_height(r);
198 		if (lh > rh + 1) { /* l: RH+2 */
199 			struct inet_peer *ll, *lr, *lrl, *lrr;
200 			int lrh;
201 			ll = l->avl_left;
202 			lr = l->avl_right;
203 			lrh = node_height(lr);
204 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
205 				node->avl_left = lr;	/* lr: RH or RH+1 */
206 				node->avl_right = r;	/* r: RH */
207 				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
208 				l->avl_left = ll;	/* ll: RH+1 */
209 				l->avl_right = node;	/* node: RH+1 or RH+2 */
210 				l->avl_height = node->avl_height + 1;
211 				*nodep = l;
212 			} else { /* ll: RH, lr: RH+1 */
213 				lrl = lr->avl_left;	/* lrl: RH or RH-1 */
214 				lrr = lr->avl_right;	/* lrr: RH or RH-1 */
215 				node->avl_left = lrr;	/* lrr: RH or RH-1 */
216 				node->avl_right = r;	/* r: RH */
217 				node->avl_height = rh + 1; /* node: RH+1 */
218 				l->avl_left = ll;	/* ll: RH */
219 				l->avl_right = lrl;	/* lrl: RH or RH-1 */
220 				l->avl_height = rh + 1;	/* l: RH+1 */
221 				lr->avl_left = l;	/* l: RH+1 */
222 				lr->avl_right = node;	/* node: RH+1 */
223 				lr->avl_height = rh + 2;
224 				*nodep = lr;
225 			}
226 		} else if (rh > lh + 1) { /* r: LH+2 */
227 			struct inet_peer *rr, *rl, *rlr, *rll;
228 			int rlh;
229 			rr = r->avl_right;
230 			rl = r->avl_left;
231 			rlh = node_height(rl);
232 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
233 				node->avl_right = rl;	/* rl: LH or LH+1 */
234 				node->avl_left = l;	/* l: LH */
235 				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
236 				r->avl_right = rr;	/* rr: LH+1 */
237 				r->avl_left = node;	/* node: LH+1 or LH+2 */
238 				r->avl_height = node->avl_height + 1;
239 				*nodep = r;
240 			} else { /* rr: RH, rl: RH+1 */
241 				rlr = rl->avl_right;	/* rlr: LH or LH-1 */
242 				rll = rl->avl_left;	/* rll: LH or LH-1 */
243 				node->avl_right = rll;	/* rll: LH or LH-1 */
244 				node->avl_left = l;	/* l: LH */
245 				node->avl_height = lh + 1; /* node: LH+1 */
246 				r->avl_right = rr;	/* rr: LH */
247 				r->avl_left = rlr;	/* rlr: LH or LH-1 */
248 				r->avl_height = lh + 1;	/* r: LH+1 */
249 				rl->avl_right = r;	/* r: LH+1 */
250 				rl->avl_left = node;	/* node: LH+1 */
251 				rl->avl_height = lh + 2;
252 				*nodep = rl;
253 			}
254 		} else {
255 			node->avl_height = (lh > rh ? lh : rh) + 1;
256 		}
257 	}
258 }
259 
260 /* Called with local BH disabled and the pool write lock held. */
261 #define link_to_pool(n)						\
262 do {								\
263 	n->avl_height = 1;					\
264 	n->avl_left = peer_avl_empty;				\
265 	n->avl_right = peer_avl_empty;				\
266 	**--stackptr = n;					\
267 	peer_avl_rebalance(stack, stackptr);			\
268 } while(0)
269 
270 /* May be called with local BH enabled. */
271 static void unlink_from_pool(struct inet_peer *p)
272 {
273 	int do_free;
274 
275 	do_free = 0;
276 
277 	write_lock_bh(&peer_pool_lock);
278 	/* Check the reference counter.  It was artificially incremented by 1
279 	 * in cleanup() function to prevent sudden disappearing.  If the
280 	 * reference count is still 1 then the node is referenced only as `p'
281 	 * here and from the pool.  So under the exclusive pool lock it's safe
282 	 * to remove the node and free it later. */
283 	if (atomic_read(&p->refcnt) == 1) {
284 		struct inet_peer **stack[PEER_MAXDEPTH];
285 		struct inet_peer ***stackptr, ***delp;
286 		if (lookup(p->v4daddr, stack) != p)
287 			BUG();
288 		delp = stackptr - 1; /* *delp[0] == p */
289 		if (p->avl_left == peer_avl_empty) {
290 			*delp[0] = p->avl_right;
291 			--stackptr;
292 		} else {
293 			/* look for a node to insert instead of p */
294 			struct inet_peer *t;
295 			t = lookup_rightempty(p);
296 			BUG_ON(*stackptr[-1] != t);
297 			**--stackptr = t->avl_left;
298 			/* t is removed, t->v4daddr > x->v4daddr for any
299 			 * x in p->avl_left subtree.
300 			 * Put t in the old place of p. */
301 			*delp[0] = t;
302 			t->avl_left = p->avl_left;
303 			t->avl_right = p->avl_right;
304 			t->avl_height = p->avl_height;
305 			BUG_ON(delp[1] != &p->avl_left);
306 			delp[1] = &t->avl_left; /* was &p->avl_left */
307 		}
308 		peer_avl_rebalance(stack, stackptr);
309 		peer_total--;
310 		do_free = 1;
311 	}
312 	write_unlock_bh(&peer_pool_lock);
313 
314 	if (do_free)
315 		kmem_cache_free(peer_cachep, p);
316 	else
317 		/* The node is used again.  Decrease the reference counter
318 		 * back.  The loop "cleanup -> unlink_from_unused
319 		 *   -> unlink_from_pool -> putpeer -> link_to_unused
320 		 *   -> cleanup (for the same node)"
321 		 * doesn't really exist because the entry will have a
322 		 * recent deletion time and will not be cleaned again soon. */
323 		inet_putpeer(p);
324 }
325 
326 /* May be called with local BH enabled. */
327 static int cleanup_once(unsigned long ttl)
328 {
329 	struct inet_peer *p = NULL;
330 
331 	/* Remove the first entry from the list of unused nodes. */
332 	spin_lock_bh(&inet_peer_unused_lock);
333 	if (!list_empty(&unused_peers)) {
334 		__u32 delta;
335 
336 		p = list_first_entry(&unused_peers, struct inet_peer, unused);
337 		delta = (__u32)jiffies - p->dtime;
338 
339 		if (delta < ttl) {
340 			/* Do not prune fresh entries. */
341 			spin_unlock_bh(&inet_peer_unused_lock);
342 			return -1;
343 		}
344 
345 		list_del_init(&p->unused);
346 
347 		/* Grab an extra reference to prevent node disappearing
348 		 * before unlink_from_pool() call. */
349 		atomic_inc(&p->refcnt);
350 	}
351 	spin_unlock_bh(&inet_peer_unused_lock);
352 
353 	if (p == NULL)
354 		/* It means that the total number of USED entries has
355 		 * grown over inet_peer_threshold.  It shouldn't really
356 		 * happen because of entry limits in route cache. */
357 		return -1;
358 
359 	unlink_from_pool(p);
360 	return 0;
361 }
362 
363 /* Called with or without local BH being disabled. */
364 struct inet_peer *inet_getpeer(__be32 daddr, int create)
365 {
366 	struct inet_peer *p, *n;
367 	struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
368 
369 	/* Look up for the address quickly. */
370 	read_lock_bh(&peer_pool_lock);
371 	p = lookup(daddr, NULL);
372 	if (p != peer_avl_empty)
373 		atomic_inc(&p->refcnt);
374 	read_unlock_bh(&peer_pool_lock);
375 
376 	if (p != peer_avl_empty) {
377 		/* The existing node has been found. */
378 		/* Remove the entry from unused list if it was there. */
379 		unlink_from_unused(p);
380 		return p;
381 	}
382 
383 	if (!create)
384 		return NULL;
385 
386 	/* Allocate the space outside the locked region. */
387 	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
388 	if (n == NULL)
389 		return NULL;
390 	n->v4daddr = daddr;
391 	atomic_set(&n->refcnt, 1);
392 	atomic_set(&n->rid, 0);
393 	n->ip_id_count = secure_ip_id(daddr);
394 	n->tcp_ts_stamp = 0;
395 
396 	write_lock_bh(&peer_pool_lock);
397 	/* Check if an entry has suddenly appeared. */
398 	p = lookup(daddr, stack);
399 	if (p != peer_avl_empty)
400 		goto out_free;
401 
402 	/* Link the node. */
403 	link_to_pool(n);
404 	INIT_LIST_HEAD(&n->unused);
405 	peer_total++;
406 	write_unlock_bh(&peer_pool_lock);
407 
408 	if (peer_total >= inet_peer_threshold)
409 		/* Remove one less-recently-used entry. */
410 		cleanup_once(0);
411 
412 	return n;
413 
414 out_free:
415 	/* The appropriate node is already in the pool. */
416 	atomic_inc(&p->refcnt);
417 	write_unlock_bh(&peer_pool_lock);
418 	/* Remove the entry from unused list if it was there. */
419 	unlink_from_unused(p);
420 	/* Free preallocated the preallocated node. */
421 	kmem_cache_free(peer_cachep, n);
422 	return p;
423 }
424 
425 /* Called with local BH disabled. */
426 static void peer_check_expire(unsigned long dummy)
427 {
428 	unsigned long now = jiffies;
429 	int ttl;
430 
431 	if (peer_total >= inet_peer_threshold)
432 		ttl = inet_peer_minttl;
433 	else
434 		ttl = inet_peer_maxttl
435 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
436 					peer_total / inet_peer_threshold * HZ;
437 	while (!cleanup_once(ttl)) {
438 		if (jiffies != now)
439 			break;
440 	}
441 
442 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
443 	 * interval depending on the total number of entries (more entries,
444 	 * less interval). */
445 	if (peer_total >= inet_peer_threshold)
446 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
447 	else
448 		peer_periodic_timer.expires = jiffies
449 			+ inet_peer_gc_maxtime
450 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
451 				peer_total / inet_peer_threshold * HZ;
452 	add_timer(&peer_periodic_timer);
453 }
454 
455 void inet_putpeer(struct inet_peer *p)
456 {
457 	spin_lock_bh(&inet_peer_unused_lock);
458 	if (atomic_dec_and_test(&p->refcnt)) {
459 		list_add_tail(&p->unused, &unused_peers);
460 		p->dtime = (__u32)jiffies;
461 	}
462 	spin_unlock_bh(&inet_peer_unused_lock);
463 }
464