xref: /openbmc/linux/net/ipv4/inetpeer.c (revision b595076a)
1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <net/ip.h>
21 #include <net/inetpeer.h>
22 
23 /*
24  *  Theory of operations.
25  *  We keep one entry for each peer IP address.  The nodes contains long-living
26  *  information about the peer which doesn't depend on routes.
27  *  At this moment this information consists only of ID field for the next
28  *  outgoing IP packet.  This field is incremented with each packet as encoded
29  *  in inet_getid() function (include/net/inetpeer.h).
30  *  At the moment of writing this notes identifier of IP packets is generated
31  *  to be unpredictable using this code only for packets subjected
32  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
33  *  PMTU in size uses a constant ID and do not use this code (see
34  *  ip_select_ident() in include/net/ip.h).
35  *
36  *  Route cache entries hold references to our nodes.
37  *  New cache entries get references via lookup by destination IP address in
38  *  the avl tree.  The reference is grabbed only when it's needed i.e. only
39  *  when we try to output IP packet which needs an unpredictable ID (see
40  *  __ip_select_ident() in net/ipv4/route.c).
41  *  Nodes are removed only when reference counter goes to 0.
42  *  When it's happened the node may be removed when a sufficient amount of
43  *  time has been passed since its last use.  The less-recently-used entry can
44  *  also be removed if the pool is overloaded i.e. if the total amount of
45  *  entries is greater-or-equal than the threshold.
46  *
47  *  Node pool is organised as an AVL tree.
48  *  Such an implementation has been chosen not just for fun.  It's a way to
49  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
50  *  amount of long living nodes in a single hash slot would significantly delay
51  *  lookups performed with disabled BHs.
52  *
53  *  Serialisation issues.
54  *  1.  Nodes may appear in the tree only with the pool lock held.
55  *  2.  Nodes may disappear from the tree only with the pool lock held
56  *      AND reference count being 0.
57  *  3.  Nodes appears and disappears from unused node list only under
58  *      "inet_peer_unused_lock".
59  *  4.  Global variable peer_total is modified under the pool lock.
60  *  5.  struct inet_peer fields modification:
61  *		avl_left, avl_right, avl_parent, avl_height: pool lock
62  *		unused: unused node list lock
63  *		refcnt: atomically against modifications on other CPU;
64  *		   usually under some other lock to prevent node disappearing
65  *		dtime: unused node list lock
66  *		v4daddr: unchangeable
67  *		ip_id_count: atomic value (no lock needed)
68  */
69 
70 static struct kmem_cache *peer_cachep __read_mostly;
71 
72 #define node_height(x) x->avl_height
73 
74 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
75 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
76 static const struct inet_peer peer_fake_node = {
77 	.avl_left	= peer_avl_empty_rcu,
78 	.avl_right	= peer_avl_empty_rcu,
79 	.avl_height	= 0
80 };
81 
82 static struct {
83 	struct inet_peer __rcu *root;
84 	spinlock_t	lock;
85 	int		total;
86 } peers = {
87 	.root		= peer_avl_empty_rcu,
88 	.lock		= __SPIN_LOCK_UNLOCKED(peers.lock),
89 	.total		= 0,
90 };
91 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
92 
93 /* Exported for sysctl_net_ipv4.  */
94 int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
95 					 * aggressively at this stage */
96 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
97 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
98 int inet_peer_gc_mintime __read_mostly = 10 * HZ;
99 int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
100 
101 static struct {
102 	struct list_head	list;
103 	spinlock_t		lock;
104 } unused_peers = {
105 	.list			= LIST_HEAD_INIT(unused_peers.list),
106 	.lock			= __SPIN_LOCK_UNLOCKED(unused_peers.lock),
107 };
108 
109 static void peer_check_expire(unsigned long dummy);
110 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
111 
112 
113 /* Called from ip_output.c:ip_init  */
114 void __init inet_initpeers(void)
115 {
116 	struct sysinfo si;
117 
118 	/* Use the straight interface to information about memory. */
119 	si_meminfo(&si);
120 	/* The values below were suggested by Alexey Kuznetsov
121 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
122 	 * myself.  --SAW
123 	 */
124 	if (si.totalram <= (32768*1024)/PAGE_SIZE)
125 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
126 	if (si.totalram <= (16384*1024)/PAGE_SIZE)
127 		inet_peer_threshold >>= 1; /* about 512KB */
128 	if (si.totalram <= (8192*1024)/PAGE_SIZE)
129 		inet_peer_threshold >>= 2; /* about 128KB */
130 
131 	peer_cachep = kmem_cache_create("inet_peer_cache",
132 			sizeof(struct inet_peer),
133 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
134 			NULL);
135 
136 	/* All the timers, started at system startup tend
137 	   to synchronize. Perturb it a bit.
138 	 */
139 	peer_periodic_timer.expires = jiffies
140 		+ net_random() % inet_peer_gc_maxtime
141 		+ inet_peer_gc_maxtime;
142 	add_timer(&peer_periodic_timer);
143 }
144 
145 /* Called with or without local BH being disabled. */
146 static void unlink_from_unused(struct inet_peer *p)
147 {
148 	if (!list_empty(&p->unused)) {
149 		spin_lock_bh(&unused_peers.lock);
150 		list_del_init(&p->unused);
151 		spin_unlock_bh(&unused_peers.lock);
152 	}
153 }
154 
155 /*
156  * Called with local BH disabled and the pool lock held.
157  */
158 #define lookup(_daddr, _stack) 					\
159 ({								\
160 	struct inet_peer *u;					\
161 	struct inet_peer __rcu **v;				\
162 								\
163 	stackptr = _stack;					\
164 	*stackptr++ = &peers.root;				\
165 	for (u = rcu_dereference_protected(peers.root,		\
166 			lockdep_is_held(&peers.lock));		\
167 	     u != peer_avl_empty; ) {				\
168 		if (_daddr == u->v4daddr)			\
169 			break;					\
170 		if ((__force __u32)_daddr < (__force __u32)u->v4daddr)	\
171 			v = &u->avl_left;			\
172 		else						\
173 			v = &u->avl_right;			\
174 		*stackptr++ = v;				\
175 		u = rcu_dereference_protected(*v,		\
176 			lockdep_is_held(&peers.lock));		\
177 	}							\
178 	u;							\
179 })
180 
181 /*
182  * Called with rcu_read_lock_bh()
183  * Because we hold no lock against a writer, its quite possible we fall
184  * in an endless loop.
185  * But every pointer we follow is guaranteed to be valid thanks to RCU.
186  * We exit from this function if number of links exceeds PEER_MAXDEPTH
187  */
188 static struct inet_peer *lookup_rcu_bh(__be32 daddr)
189 {
190 	struct inet_peer *u = rcu_dereference_bh(peers.root);
191 	int count = 0;
192 
193 	while (u != peer_avl_empty) {
194 		if (daddr == u->v4daddr) {
195 			/* Before taking a reference, check if this entry was
196 			 * deleted, unlink_from_pool() sets refcnt=-1 to make
197 			 * distinction between an unused entry (refcnt=0) and
198 			 * a freed one.
199 			 */
200 			if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
201 				u = NULL;
202 			return u;
203 		}
204 		if ((__force __u32)daddr < (__force __u32)u->v4daddr)
205 			u = rcu_dereference_bh(u->avl_left);
206 		else
207 			u = rcu_dereference_bh(u->avl_right);
208 		if (unlikely(++count == PEER_MAXDEPTH))
209 			break;
210 	}
211 	return NULL;
212 }
213 
214 /* Called with local BH disabled and the pool lock held. */
215 #define lookup_rightempty(start)				\
216 ({								\
217 	struct inet_peer *u;					\
218 	struct inet_peer __rcu **v;				\
219 	*stackptr++ = &start->avl_left;				\
220 	v = &start->avl_left;					\
221 	for (u = rcu_dereference_protected(*v,			\
222 			lockdep_is_held(&peers.lock));		\
223 	     u->avl_right != peer_avl_empty_rcu; ) {		\
224 		v = &u->avl_right;				\
225 		*stackptr++ = v;				\
226 		u = rcu_dereference_protected(*v,		\
227 			lockdep_is_held(&peers.lock));		\
228 	}							\
229 	u;							\
230 })
231 
232 /* Called with local BH disabled and the pool lock held.
233  * Variable names are the proof of operation correctness.
234  * Look into mm/map_avl.c for more detail description of the ideas.
235  */
236 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
237 		struct inet_peer __rcu ***stackend)
238 {
239 	struct inet_peer __rcu **nodep;
240 	struct inet_peer *node, *l, *r;
241 	int lh, rh;
242 
243 	while (stackend > stack) {
244 		nodep = *--stackend;
245 		node = rcu_dereference_protected(*nodep,
246 				lockdep_is_held(&peers.lock));
247 		l = rcu_dereference_protected(node->avl_left,
248 				lockdep_is_held(&peers.lock));
249 		r = rcu_dereference_protected(node->avl_right,
250 				lockdep_is_held(&peers.lock));
251 		lh = node_height(l);
252 		rh = node_height(r);
253 		if (lh > rh + 1) { /* l: RH+2 */
254 			struct inet_peer *ll, *lr, *lrl, *lrr;
255 			int lrh;
256 			ll = rcu_dereference_protected(l->avl_left,
257 				lockdep_is_held(&peers.lock));
258 			lr = rcu_dereference_protected(l->avl_right,
259 				lockdep_is_held(&peers.lock));
260 			lrh = node_height(lr);
261 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
262 				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
263 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
264 				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
265 				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
266 				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
267 				l->avl_height = node->avl_height + 1;
268 				RCU_INIT_POINTER(*nodep, l);
269 			} else { /* ll: RH, lr: RH+1 */
270 				lrl = rcu_dereference_protected(lr->avl_left,
271 					lockdep_is_held(&peers.lock));	/* lrl: RH or RH-1 */
272 				lrr = rcu_dereference_protected(lr->avl_right,
273 					lockdep_is_held(&peers.lock));	/* lrr: RH or RH-1 */
274 				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
275 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
276 				node->avl_height = rh + 1; /* node: RH+1 */
277 				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
278 				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
279 				l->avl_height = rh + 1;	/* l: RH+1 */
280 				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
281 				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
282 				lr->avl_height = rh + 2;
283 				RCU_INIT_POINTER(*nodep, lr);
284 			}
285 		} else if (rh > lh + 1) { /* r: LH+2 */
286 			struct inet_peer *rr, *rl, *rlr, *rll;
287 			int rlh;
288 			rr = rcu_dereference_protected(r->avl_right,
289 				lockdep_is_held(&peers.lock));
290 			rl = rcu_dereference_protected(r->avl_left,
291 				lockdep_is_held(&peers.lock));
292 			rlh = node_height(rl);
293 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
294 				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
295 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
296 				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
297 				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
298 				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
299 				r->avl_height = node->avl_height + 1;
300 				RCU_INIT_POINTER(*nodep, r);
301 			} else { /* rr: RH, rl: RH+1 */
302 				rlr = rcu_dereference_protected(rl->avl_right,
303 					lockdep_is_held(&peers.lock));	/* rlr: LH or LH-1 */
304 				rll = rcu_dereference_protected(rl->avl_left,
305 					lockdep_is_held(&peers.lock));	/* rll: LH or LH-1 */
306 				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
307 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
308 				node->avl_height = lh + 1; /* node: LH+1 */
309 				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
310 				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
311 				r->avl_height = lh + 1;	/* r: LH+1 */
312 				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
313 				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
314 				rl->avl_height = lh + 2;
315 				RCU_INIT_POINTER(*nodep, rl);
316 			}
317 		} else {
318 			node->avl_height = (lh > rh ? lh : rh) + 1;
319 		}
320 	}
321 }
322 
323 /* Called with local BH disabled and the pool lock held. */
324 #define link_to_pool(n)						\
325 do {								\
326 	n->avl_height = 1;					\
327 	n->avl_left = peer_avl_empty_rcu;			\
328 	n->avl_right = peer_avl_empty_rcu;			\
329 	/* lockless readers can catch us now */			\
330 	rcu_assign_pointer(**--stackptr, n);			\
331 	peer_avl_rebalance(stack, stackptr);			\
332 } while (0)
333 
334 static void inetpeer_free_rcu(struct rcu_head *head)
335 {
336 	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
337 }
338 
339 /* May be called with local BH enabled. */
340 static void unlink_from_pool(struct inet_peer *p)
341 {
342 	int do_free;
343 
344 	do_free = 0;
345 
346 	spin_lock_bh(&peers.lock);
347 	/* Check the reference counter.  It was artificially incremented by 1
348 	 * in cleanup() function to prevent sudden disappearing.  If we can
349 	 * atomically (because of lockless readers) take this last reference,
350 	 * it's safe to remove the node and free it later.
351 	 * We use refcnt=-1 to alert lockless readers this entry is deleted.
352 	 */
353 	if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
354 		struct inet_peer __rcu **stack[PEER_MAXDEPTH];
355 		struct inet_peer __rcu ***stackptr, ***delp;
356 		if (lookup(p->v4daddr, stack) != p)
357 			BUG();
358 		delp = stackptr - 1; /* *delp[0] == p */
359 		if (p->avl_left == peer_avl_empty_rcu) {
360 			*delp[0] = p->avl_right;
361 			--stackptr;
362 		} else {
363 			/* look for a node to insert instead of p */
364 			struct inet_peer *t;
365 			t = lookup_rightempty(p);
366 			BUG_ON(rcu_dereference_protected(*stackptr[-1],
367 					lockdep_is_held(&peers.lock)) != t);
368 			**--stackptr = t->avl_left;
369 			/* t is removed, t->v4daddr > x->v4daddr for any
370 			 * x in p->avl_left subtree.
371 			 * Put t in the old place of p. */
372 			RCU_INIT_POINTER(*delp[0], t);
373 			t->avl_left = p->avl_left;
374 			t->avl_right = p->avl_right;
375 			t->avl_height = p->avl_height;
376 			BUG_ON(delp[1] != &p->avl_left);
377 			delp[1] = &t->avl_left; /* was &p->avl_left */
378 		}
379 		peer_avl_rebalance(stack, stackptr);
380 		peers.total--;
381 		do_free = 1;
382 	}
383 	spin_unlock_bh(&peers.lock);
384 
385 	if (do_free)
386 		call_rcu_bh(&p->rcu, inetpeer_free_rcu);
387 	else
388 		/* The node is used again.  Decrease the reference counter
389 		 * back.  The loop "cleanup -> unlink_from_unused
390 		 *   -> unlink_from_pool -> putpeer -> link_to_unused
391 		 *   -> cleanup (for the same node)"
392 		 * doesn't really exist because the entry will have a
393 		 * recent deletion time and will not be cleaned again soon.
394 		 */
395 		inet_putpeer(p);
396 }
397 
398 /* May be called with local BH enabled. */
399 static int cleanup_once(unsigned long ttl)
400 {
401 	struct inet_peer *p = NULL;
402 
403 	/* Remove the first entry from the list of unused nodes. */
404 	spin_lock_bh(&unused_peers.lock);
405 	if (!list_empty(&unused_peers.list)) {
406 		__u32 delta;
407 
408 		p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
409 		delta = (__u32)jiffies - p->dtime;
410 
411 		if (delta < ttl) {
412 			/* Do not prune fresh entries. */
413 			spin_unlock_bh(&unused_peers.lock);
414 			return -1;
415 		}
416 
417 		list_del_init(&p->unused);
418 
419 		/* Grab an extra reference to prevent node disappearing
420 		 * before unlink_from_pool() call. */
421 		atomic_inc(&p->refcnt);
422 	}
423 	spin_unlock_bh(&unused_peers.lock);
424 
425 	if (p == NULL)
426 		/* It means that the total number of USED entries has
427 		 * grown over inet_peer_threshold.  It shouldn't really
428 		 * happen because of entry limits in route cache. */
429 		return -1;
430 
431 	unlink_from_pool(p);
432 	return 0;
433 }
434 
435 /* Called with or without local BH being disabled. */
436 struct inet_peer *inet_getpeer(__be32 daddr, int create)
437 {
438 	struct inet_peer *p;
439 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
440 
441 	/* Look up for the address quickly, lockless.
442 	 * Because of a concurrent writer, we might not find an existing entry.
443 	 */
444 	rcu_read_lock_bh();
445 	p = lookup_rcu_bh(daddr);
446 	rcu_read_unlock_bh();
447 
448 	if (p) {
449 		/* The existing node has been found.
450 		 * Remove the entry from unused list if it was there.
451 		 */
452 		unlink_from_unused(p);
453 		return p;
454 	}
455 
456 	/* retry an exact lookup, taking the lock before.
457 	 * At least, nodes should be hot in our cache.
458 	 */
459 	spin_lock_bh(&peers.lock);
460 	p = lookup(daddr, stack);
461 	if (p != peer_avl_empty) {
462 		atomic_inc(&p->refcnt);
463 		spin_unlock_bh(&peers.lock);
464 		/* Remove the entry from unused list if it was there. */
465 		unlink_from_unused(p);
466 		return p;
467 	}
468 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
469 	if (p) {
470 		p->v4daddr = daddr;
471 		atomic_set(&p->refcnt, 1);
472 		atomic_set(&p->rid, 0);
473 		atomic_set(&p->ip_id_count, secure_ip_id(daddr));
474 		p->tcp_ts_stamp = 0;
475 		INIT_LIST_HEAD(&p->unused);
476 
477 
478 		/* Link the node. */
479 		link_to_pool(p);
480 		peers.total++;
481 	}
482 	spin_unlock_bh(&peers.lock);
483 
484 	if (peers.total >= inet_peer_threshold)
485 		/* Remove one less-recently-used entry. */
486 		cleanup_once(0);
487 
488 	return p;
489 }
490 
491 /* Called with local BH disabled. */
492 static void peer_check_expire(unsigned long dummy)
493 {
494 	unsigned long now = jiffies;
495 	int ttl;
496 
497 	if (peers.total >= inet_peer_threshold)
498 		ttl = inet_peer_minttl;
499 	else
500 		ttl = inet_peer_maxttl
501 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
502 					peers.total / inet_peer_threshold * HZ;
503 	while (!cleanup_once(ttl)) {
504 		if (jiffies != now)
505 			break;
506 	}
507 
508 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
509 	 * interval depending on the total number of entries (more entries,
510 	 * less interval). */
511 	if (peers.total >= inet_peer_threshold)
512 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
513 	else
514 		peer_periodic_timer.expires = jiffies
515 			+ inet_peer_gc_maxtime
516 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
517 				peers.total / inet_peer_threshold * HZ;
518 	add_timer(&peer_periodic_timer);
519 }
520 
521 void inet_putpeer(struct inet_peer *p)
522 {
523 	local_bh_disable();
524 
525 	if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
526 		list_add_tail(&p->unused, &unused_peers.list);
527 		p->dtime = (__u32)jiffies;
528 		spin_unlock(&unused_peers.lock);
529 	}
530 
531 	local_bh_enable();
532 }
533