1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * This source is covered by the GNU GPL, the same as all kernel sources. 5 * 6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $ 7 * 8 * Authors: Andrey V. Savochkin <saw@msu.ru> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/random.h> 17 #include <linux/timer.h> 18 #include <linux/time.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/net.h> 22 #include <net/ip.h> 23 #include <net/inetpeer.h> 24 25 /* 26 * Theory of operations. 27 * We keep one entry for each peer IP address. The nodes contains long-living 28 * information about the peer which doesn't depend on routes. 29 * At this moment this information consists only of ID field for the next 30 * outgoing IP packet. This field is incremented with each packet as encoded 31 * in inet_getid() function (include/net/inetpeer.h). 32 * At the moment of writing this notes identifier of IP packets is generated 33 * to be unpredictable using this code only for packets subjected 34 * (actually or potentially) to defragmentation. I.e. DF packets less than 35 * PMTU in size uses a constant ID and do not use this code (see 36 * ip_select_ident() in include/net/ip.h). 37 * 38 * Route cache entries hold references to our nodes. 39 * New cache entries get references via lookup by destination IP address in 40 * the avl tree. The reference is grabbed only when it's needed i.e. only 41 * when we try to output IP packet which needs an unpredictable ID (see 42 * __ip_select_ident() in net/ipv4/route.c). 43 * Nodes are removed only when reference counter goes to 0. 44 * When it's happened the node may be removed when a sufficient amount of 45 * time has been passed since its last use. The less-recently-used entry can 46 * also be removed if the pool is overloaded i.e. if the total amount of 47 * entries is greater-or-equal than the threshold. 48 * 49 * Node pool is organised as an AVL tree. 50 * Such an implementation has been chosen not just for fun. It's a way to 51 * prevent easy and efficient DoS attacks by creating hash collisions. A huge 52 * amount of long living nodes in a single hash slot would significantly delay 53 * lookups performed with disabled BHs. 54 * 55 * Serialisation issues. 56 * 1. Nodes may appear in the tree only with the pool write lock held. 57 * 2. Nodes may disappear from the tree only with the pool write lock held 58 * AND reference count being 0. 59 * 3. Nodes appears and disappears from unused node list only under 60 * "inet_peer_unused_lock". 61 * 4. Global variable peer_total is modified under the pool lock. 62 * 5. struct inet_peer fields modification: 63 * avl_left, avl_right, avl_parent, avl_height: pool lock 64 * unused: unused node list lock 65 * refcnt: atomically against modifications on other CPU; 66 * usually under some other lock to prevent node disappearing 67 * dtime: unused node list lock 68 * v4daddr: unchangeable 69 * ip_id_count: idlock 70 */ 71 72 /* Exported for inet_getid inline function. */ 73 DEFINE_SPINLOCK(inet_peer_idlock); 74 75 static struct kmem_cache *peer_cachep __read_mostly; 76 77 #define node_height(x) x->avl_height 78 static struct inet_peer peer_fake_node = { 79 .avl_left = &peer_fake_node, 80 .avl_right = &peer_fake_node, 81 .avl_height = 0 82 }; 83 #define peer_avl_empty (&peer_fake_node) 84 static struct inet_peer *peer_root = peer_avl_empty; 85 static DEFINE_RWLOCK(peer_pool_lock); 86 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 87 88 static int peer_total; 89 /* Exported for sysctl_net_ipv4. */ 90 int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more 91 * aggressively at this stage */ 92 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 93 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 94 int inet_peer_gc_mintime __read_mostly = 10 * HZ; 95 int inet_peer_gc_maxtime __read_mostly = 120 * HZ; 96 97 static LIST_HEAD(unused_peers); 98 static DEFINE_SPINLOCK(inet_peer_unused_lock); 99 100 static void peer_check_expire(unsigned long dummy); 101 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); 102 103 104 /* Called from ip_output.c:ip_init */ 105 void __init inet_initpeers(void) 106 { 107 struct sysinfo si; 108 109 /* Use the straight interface to information about memory. */ 110 si_meminfo(&si); 111 /* The values below were suggested by Alexey Kuznetsov 112 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values 113 * myself. --SAW 114 */ 115 if (si.totalram <= (32768*1024)/PAGE_SIZE) 116 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ 117 if (si.totalram <= (16384*1024)/PAGE_SIZE) 118 inet_peer_threshold >>= 1; /* about 512KB */ 119 if (si.totalram <= (8192*1024)/PAGE_SIZE) 120 inet_peer_threshold >>= 2; /* about 128KB */ 121 122 peer_cachep = kmem_cache_create("inet_peer_cache", 123 sizeof(struct inet_peer), 124 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 125 NULL); 126 127 /* All the timers, started at system startup tend 128 to synchronize. Perturb it a bit. 129 */ 130 peer_periodic_timer.expires = jiffies 131 + net_random() % inet_peer_gc_maxtime 132 + inet_peer_gc_maxtime; 133 add_timer(&peer_periodic_timer); 134 } 135 136 /* Called with or without local BH being disabled. */ 137 static void unlink_from_unused(struct inet_peer *p) 138 { 139 spin_lock_bh(&inet_peer_unused_lock); 140 list_del_init(&p->unused); 141 spin_unlock_bh(&inet_peer_unused_lock); 142 } 143 144 /* 145 * Called with local BH disabled and the pool lock held. 146 * _stack is known to be NULL or not at compile time, 147 * so compiler will optimize the if (_stack) tests. 148 */ 149 #define lookup(_daddr,_stack) \ 150 ({ \ 151 struct inet_peer *u, **v; \ 152 if (_stack != NULL) { \ 153 stackptr = _stack; \ 154 *stackptr++ = &peer_root; \ 155 } \ 156 for (u = peer_root; u != peer_avl_empty; ) { \ 157 if (_daddr == u->v4daddr) \ 158 break; \ 159 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ 160 v = &u->avl_left; \ 161 else \ 162 v = &u->avl_right; \ 163 if (_stack != NULL) \ 164 *stackptr++ = v; \ 165 u = *v; \ 166 } \ 167 u; \ 168 }) 169 170 /* Called with local BH disabled and the pool write lock held. */ 171 #define lookup_rightempty(start) \ 172 ({ \ 173 struct inet_peer *u, **v; \ 174 *stackptr++ = &start->avl_left; \ 175 v = &start->avl_left; \ 176 for (u = *v; u->avl_right != peer_avl_empty; ) { \ 177 v = &u->avl_right; \ 178 *stackptr++ = v; \ 179 u = *v; \ 180 } \ 181 u; \ 182 }) 183 184 /* Called with local BH disabled and the pool write lock held. 185 * Variable names are the proof of operation correctness. 186 * Look into mm/map_avl.c for more detail description of the ideas. */ 187 static void peer_avl_rebalance(struct inet_peer **stack[], 188 struct inet_peer ***stackend) 189 { 190 struct inet_peer **nodep, *node, *l, *r; 191 int lh, rh; 192 193 while (stackend > stack) { 194 nodep = *--stackend; 195 node = *nodep; 196 l = node->avl_left; 197 r = node->avl_right; 198 lh = node_height(l); 199 rh = node_height(r); 200 if (lh > rh + 1) { /* l: RH+2 */ 201 struct inet_peer *ll, *lr, *lrl, *lrr; 202 int lrh; 203 ll = l->avl_left; 204 lr = l->avl_right; 205 lrh = node_height(lr); 206 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 207 node->avl_left = lr; /* lr: RH or RH+1 */ 208 node->avl_right = r; /* r: RH */ 209 node->avl_height = lrh + 1; /* RH+1 or RH+2 */ 210 l->avl_left = ll; /* ll: RH+1 */ 211 l->avl_right = node; /* node: RH+1 or RH+2 */ 212 l->avl_height = node->avl_height + 1; 213 *nodep = l; 214 } else { /* ll: RH, lr: RH+1 */ 215 lrl = lr->avl_left; /* lrl: RH or RH-1 */ 216 lrr = lr->avl_right; /* lrr: RH or RH-1 */ 217 node->avl_left = lrr; /* lrr: RH or RH-1 */ 218 node->avl_right = r; /* r: RH */ 219 node->avl_height = rh + 1; /* node: RH+1 */ 220 l->avl_left = ll; /* ll: RH */ 221 l->avl_right = lrl; /* lrl: RH or RH-1 */ 222 l->avl_height = rh + 1; /* l: RH+1 */ 223 lr->avl_left = l; /* l: RH+1 */ 224 lr->avl_right = node; /* node: RH+1 */ 225 lr->avl_height = rh + 2; 226 *nodep = lr; 227 } 228 } else if (rh > lh + 1) { /* r: LH+2 */ 229 struct inet_peer *rr, *rl, *rlr, *rll; 230 int rlh; 231 rr = r->avl_right; 232 rl = r->avl_left; 233 rlh = node_height(rl); 234 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 235 node->avl_right = rl; /* rl: LH or LH+1 */ 236 node->avl_left = l; /* l: LH */ 237 node->avl_height = rlh + 1; /* LH+1 or LH+2 */ 238 r->avl_right = rr; /* rr: LH+1 */ 239 r->avl_left = node; /* node: LH+1 or LH+2 */ 240 r->avl_height = node->avl_height + 1; 241 *nodep = r; 242 } else { /* rr: RH, rl: RH+1 */ 243 rlr = rl->avl_right; /* rlr: LH or LH-1 */ 244 rll = rl->avl_left; /* rll: LH or LH-1 */ 245 node->avl_right = rll; /* rll: LH or LH-1 */ 246 node->avl_left = l; /* l: LH */ 247 node->avl_height = lh + 1; /* node: LH+1 */ 248 r->avl_right = rr; /* rr: LH */ 249 r->avl_left = rlr; /* rlr: LH or LH-1 */ 250 r->avl_height = lh + 1; /* r: LH+1 */ 251 rl->avl_right = r; /* r: LH+1 */ 252 rl->avl_left = node; /* node: LH+1 */ 253 rl->avl_height = lh + 2; 254 *nodep = rl; 255 } 256 } else { 257 node->avl_height = (lh > rh ? lh : rh) + 1; 258 } 259 } 260 } 261 262 /* Called with local BH disabled and the pool write lock held. */ 263 #define link_to_pool(n) \ 264 do { \ 265 n->avl_height = 1; \ 266 n->avl_left = peer_avl_empty; \ 267 n->avl_right = peer_avl_empty; \ 268 **--stackptr = n; \ 269 peer_avl_rebalance(stack, stackptr); \ 270 } while(0) 271 272 /* May be called with local BH enabled. */ 273 static void unlink_from_pool(struct inet_peer *p) 274 { 275 int do_free; 276 277 do_free = 0; 278 279 write_lock_bh(&peer_pool_lock); 280 /* Check the reference counter. It was artificially incremented by 1 281 * in cleanup() function to prevent sudden disappearing. If the 282 * reference count is still 1 then the node is referenced only as `p' 283 * here and from the pool. So under the exclusive pool lock it's safe 284 * to remove the node and free it later. */ 285 if (atomic_read(&p->refcnt) == 1) { 286 struct inet_peer **stack[PEER_MAXDEPTH]; 287 struct inet_peer ***stackptr, ***delp; 288 if (lookup(p->v4daddr, stack) != p) 289 BUG(); 290 delp = stackptr - 1; /* *delp[0] == p */ 291 if (p->avl_left == peer_avl_empty) { 292 *delp[0] = p->avl_right; 293 --stackptr; 294 } else { 295 /* look for a node to insert instead of p */ 296 struct inet_peer *t; 297 t = lookup_rightempty(p); 298 BUG_ON(*stackptr[-1] != t); 299 **--stackptr = t->avl_left; 300 /* t is removed, t->v4daddr > x->v4daddr for any 301 * x in p->avl_left subtree. 302 * Put t in the old place of p. */ 303 *delp[0] = t; 304 t->avl_left = p->avl_left; 305 t->avl_right = p->avl_right; 306 t->avl_height = p->avl_height; 307 BUG_ON(delp[1] != &p->avl_left); 308 delp[1] = &t->avl_left; /* was &p->avl_left */ 309 } 310 peer_avl_rebalance(stack, stackptr); 311 peer_total--; 312 do_free = 1; 313 } 314 write_unlock_bh(&peer_pool_lock); 315 316 if (do_free) 317 kmem_cache_free(peer_cachep, p); 318 else 319 /* The node is used again. Decrease the reference counter 320 * back. The loop "cleanup -> unlink_from_unused 321 * -> unlink_from_pool -> putpeer -> link_to_unused 322 * -> cleanup (for the same node)" 323 * doesn't really exist because the entry will have a 324 * recent deletion time and will not be cleaned again soon. */ 325 inet_putpeer(p); 326 } 327 328 /* May be called with local BH enabled. */ 329 static int cleanup_once(unsigned long ttl) 330 { 331 struct inet_peer *p = NULL; 332 333 /* Remove the first entry from the list of unused nodes. */ 334 spin_lock_bh(&inet_peer_unused_lock); 335 if (!list_empty(&unused_peers)) { 336 __u32 delta; 337 338 p = list_first_entry(&unused_peers, struct inet_peer, unused); 339 delta = (__u32)jiffies - p->dtime; 340 341 if (delta < ttl) { 342 /* Do not prune fresh entries. */ 343 spin_unlock_bh(&inet_peer_unused_lock); 344 return -1; 345 } 346 347 list_del_init(&p->unused); 348 349 /* Grab an extra reference to prevent node disappearing 350 * before unlink_from_pool() call. */ 351 atomic_inc(&p->refcnt); 352 } 353 spin_unlock_bh(&inet_peer_unused_lock); 354 355 if (p == NULL) 356 /* It means that the total number of USED entries has 357 * grown over inet_peer_threshold. It shouldn't really 358 * happen because of entry limits in route cache. */ 359 return -1; 360 361 unlink_from_pool(p); 362 return 0; 363 } 364 365 /* Called with or without local BH being disabled. */ 366 struct inet_peer *inet_getpeer(__be32 daddr, int create) 367 { 368 struct inet_peer *p, *n; 369 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; 370 371 /* Look up for the address quickly. */ 372 read_lock_bh(&peer_pool_lock); 373 p = lookup(daddr, NULL); 374 if (p != peer_avl_empty) 375 atomic_inc(&p->refcnt); 376 read_unlock_bh(&peer_pool_lock); 377 378 if (p != peer_avl_empty) { 379 /* The existing node has been found. */ 380 /* Remove the entry from unused list if it was there. */ 381 unlink_from_unused(p); 382 return p; 383 } 384 385 if (!create) 386 return NULL; 387 388 /* Allocate the space outside the locked region. */ 389 n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); 390 if (n == NULL) 391 return NULL; 392 n->v4daddr = daddr; 393 atomic_set(&n->refcnt, 1); 394 atomic_set(&n->rid, 0); 395 n->ip_id_count = secure_ip_id(daddr); 396 n->tcp_ts_stamp = 0; 397 398 write_lock_bh(&peer_pool_lock); 399 /* Check if an entry has suddenly appeared. */ 400 p = lookup(daddr, stack); 401 if (p != peer_avl_empty) 402 goto out_free; 403 404 /* Link the node. */ 405 link_to_pool(n); 406 INIT_LIST_HEAD(&n->unused); 407 peer_total++; 408 write_unlock_bh(&peer_pool_lock); 409 410 if (peer_total >= inet_peer_threshold) 411 /* Remove one less-recently-used entry. */ 412 cleanup_once(0); 413 414 return n; 415 416 out_free: 417 /* The appropriate node is already in the pool. */ 418 atomic_inc(&p->refcnt); 419 write_unlock_bh(&peer_pool_lock); 420 /* Remove the entry from unused list if it was there. */ 421 unlink_from_unused(p); 422 /* Free preallocated the preallocated node. */ 423 kmem_cache_free(peer_cachep, n); 424 return p; 425 } 426 427 /* Called with local BH disabled. */ 428 static void peer_check_expire(unsigned long dummy) 429 { 430 unsigned long now = jiffies; 431 int ttl; 432 433 if (peer_total >= inet_peer_threshold) 434 ttl = inet_peer_minttl; 435 else 436 ttl = inet_peer_maxttl 437 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 438 peer_total / inet_peer_threshold * HZ; 439 while (!cleanup_once(ttl)) { 440 if (jiffies != now) 441 break; 442 } 443 444 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 445 * interval depending on the total number of entries (more entries, 446 * less interval). */ 447 if (peer_total >= inet_peer_threshold) 448 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; 449 else 450 peer_periodic_timer.expires = jiffies 451 + inet_peer_gc_maxtime 452 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * 453 peer_total / inet_peer_threshold * HZ; 454 add_timer(&peer_periodic_timer); 455 } 456 457 void inet_putpeer(struct inet_peer *p) 458 { 459 spin_lock_bh(&inet_peer_unused_lock); 460 if (atomic_dec_and_test(&p->refcnt)) { 461 list_add_tail(&p->unused, &unused_peers); 462 p->dtime = (__u32)jiffies; 463 } 464 spin_unlock_bh(&inet_peer_unused_lock); 465 } 466