1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * This source is covered by the GNU GPL, the same as all kernel sources. 5 * 6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $ 7 * 8 * Authors: Andrey V. Savochkin <saw@msu.ru> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/random.h> 17 #include <linux/sched.h> 18 #include <linux/timer.h> 19 #include <linux/time.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/net.h> 23 #include <net/ip.h> 24 #include <net/inetpeer.h> 25 26 /* 27 * Theory of operations. 28 * We keep one entry for each peer IP address. The nodes contains long-living 29 * information about the peer which doesn't depend on routes. 30 * At this moment this information consists only of ID field for the next 31 * outgoing IP packet. This field is incremented with each packet as encoded 32 * in inet_getid() function (include/net/inetpeer.h). 33 * At the moment of writing this notes identifier of IP packets is generated 34 * to be unpredictable using this code only for packets subjected 35 * (actually or potentially) to defragmentation. I.e. DF packets less than 36 * PMTU in size uses a constant ID and do not use this code (see 37 * ip_select_ident() in include/net/ip.h). 38 * 39 * Route cache entries hold references to our nodes. 40 * New cache entries get references via lookup by destination IP address in 41 * the avl tree. The reference is grabbed only when it's needed i.e. only 42 * when we try to output IP packet which needs an unpredictable ID (see 43 * __ip_select_ident() in net/ipv4/route.c). 44 * Nodes are removed only when reference counter goes to 0. 45 * When it's happened the node may be removed when a sufficient amount of 46 * time has been passed since its last use. The less-recently-used entry can 47 * also be removed if the pool is overloaded i.e. if the total amount of 48 * entries is greater-or-equal than the threshold. 49 * 50 * Node pool is organised as an AVL tree. 51 * Such an implementation has been chosen not just for fun. It's a way to 52 * prevent easy and efficient DoS attacks by creating hash collisions. A huge 53 * amount of long living nodes in a single hash slot would significantly delay 54 * lookups performed with disabled BHs. 55 * 56 * Serialisation issues. 57 * 1. Nodes may appear in the tree only with the pool write lock held. 58 * 2. Nodes may disappear from the tree only with the pool write lock held 59 * AND reference count being 0. 60 * 3. Nodes appears and disappears from unused node list only under 61 * "inet_peer_unused_lock". 62 * 4. Global variable peer_total is modified under the pool lock. 63 * 5. struct inet_peer fields modification: 64 * avl_left, avl_right, avl_parent, avl_height: pool lock 65 * unused_next, unused_prevp: unused node list lock 66 * refcnt: atomically against modifications on other CPU; 67 * usually under some other lock to prevent node disappearing 68 * dtime: unused node list lock 69 * v4daddr: unchangeable 70 * ip_id_count: idlock 71 */ 72 73 /* Exported for inet_getid inline function. */ 74 DEFINE_SPINLOCK(inet_peer_idlock); 75 76 static kmem_cache_t *peer_cachep; 77 78 #define node_height(x) x->avl_height 79 static struct inet_peer peer_fake_node = { 80 .avl_left = &peer_fake_node, 81 .avl_right = &peer_fake_node, 82 .avl_height = 0 83 }; 84 #define peer_avl_empty (&peer_fake_node) 85 static struct inet_peer *peer_root = peer_avl_empty; 86 static DEFINE_RWLOCK(peer_pool_lock); 87 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 88 89 static volatile int peer_total; 90 /* Exported for sysctl_net_ipv4. */ 91 int inet_peer_threshold = 65536 + 128; /* start to throw entries more 92 * aggressively at this stage */ 93 int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */ 94 int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ 95 96 static struct inet_peer *inet_peer_unused_head; 97 /* Exported for inet_putpeer inline function. */ 98 struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; 99 DEFINE_SPINLOCK(inet_peer_unused_lock); 100 #define PEER_MAX_CLEANUP_WORK 30 101 102 static void peer_check_expire(unsigned long dummy); 103 static struct timer_list peer_periodic_timer = 104 TIMER_INITIALIZER(peer_check_expire, 0, 0); 105 106 /* Exported for sysctl_net_ipv4. */ 107 int inet_peer_gc_mintime = 10 * HZ, 108 inet_peer_gc_maxtime = 120 * HZ; 109 110 /* Called from ip_output.c:ip_init */ 111 void __init inet_initpeers(void) 112 { 113 struct sysinfo si; 114 115 /* Use the straight interface to information about memory. */ 116 si_meminfo(&si); 117 /* The values below were suggested by Alexey Kuznetsov 118 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values 119 * myself. --SAW 120 */ 121 if (si.totalram <= (32768*1024)/PAGE_SIZE) 122 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ 123 if (si.totalram <= (16384*1024)/PAGE_SIZE) 124 inet_peer_threshold >>= 1; /* about 512KB */ 125 if (si.totalram <= (8192*1024)/PAGE_SIZE) 126 inet_peer_threshold >>= 2; /* about 128KB */ 127 128 peer_cachep = kmem_cache_create("inet_peer_cache", 129 sizeof(struct inet_peer), 130 0, SLAB_HWCACHE_ALIGN, 131 NULL, NULL); 132 133 if (!peer_cachep) 134 panic("cannot create inet_peer_cache"); 135 136 /* All the timers, started at system startup tend 137 to synchronize. Perturb it a bit. 138 */ 139 peer_periodic_timer.expires = jiffies 140 + net_random() % inet_peer_gc_maxtime 141 + inet_peer_gc_maxtime; 142 add_timer(&peer_periodic_timer); 143 } 144 145 /* Called with or without local BH being disabled. */ 146 static void unlink_from_unused(struct inet_peer *p) 147 { 148 spin_lock_bh(&inet_peer_unused_lock); 149 if (p->unused_prevp != NULL) { 150 /* On unused list. */ 151 *p->unused_prevp = p->unused_next; 152 if (p->unused_next != NULL) 153 p->unused_next->unused_prevp = p->unused_prevp; 154 else 155 inet_peer_unused_tailp = p->unused_prevp; 156 p->unused_prevp = NULL; /* mark it as removed */ 157 } 158 spin_unlock_bh(&inet_peer_unused_lock); 159 } 160 161 /* Called with local BH disabled and the pool lock held. */ 162 #define lookup(daddr) \ 163 ({ \ 164 struct inet_peer *u, **v; \ 165 stackptr = stack; \ 166 *stackptr++ = &peer_root; \ 167 for (u = peer_root; u != peer_avl_empty; ) { \ 168 if (daddr == u->v4daddr) \ 169 break; \ 170 if (daddr < u->v4daddr) \ 171 v = &u->avl_left; \ 172 else \ 173 v = &u->avl_right; \ 174 *stackptr++ = v; \ 175 u = *v; \ 176 } \ 177 u; \ 178 }) 179 180 /* Called with local BH disabled and the pool write lock held. */ 181 #define lookup_rightempty(start) \ 182 ({ \ 183 struct inet_peer *u, **v; \ 184 *stackptr++ = &start->avl_left; \ 185 v = &start->avl_left; \ 186 for (u = *v; u->avl_right != peer_avl_empty; ) { \ 187 v = &u->avl_right; \ 188 *stackptr++ = v; \ 189 u = *v; \ 190 } \ 191 u; \ 192 }) 193 194 /* Called with local BH disabled and the pool write lock held. 195 * Variable names are the proof of operation correctness. 196 * Look into mm/map_avl.c for more detail description of the ideas. */ 197 static void peer_avl_rebalance(struct inet_peer **stack[], 198 struct inet_peer ***stackend) 199 { 200 struct inet_peer **nodep, *node, *l, *r; 201 int lh, rh; 202 203 while (stackend > stack) { 204 nodep = *--stackend; 205 node = *nodep; 206 l = node->avl_left; 207 r = node->avl_right; 208 lh = node_height(l); 209 rh = node_height(r); 210 if (lh > rh + 1) { /* l: RH+2 */ 211 struct inet_peer *ll, *lr, *lrl, *lrr; 212 int lrh; 213 ll = l->avl_left; 214 lr = l->avl_right; 215 lrh = node_height(lr); 216 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 217 node->avl_left = lr; /* lr: RH or RH+1 */ 218 node->avl_right = r; /* r: RH */ 219 node->avl_height = lrh + 1; /* RH+1 or RH+2 */ 220 l->avl_left = ll; /* ll: RH+1 */ 221 l->avl_right = node; /* node: RH+1 or RH+2 */ 222 l->avl_height = node->avl_height + 1; 223 *nodep = l; 224 } else { /* ll: RH, lr: RH+1 */ 225 lrl = lr->avl_left; /* lrl: RH or RH-1 */ 226 lrr = lr->avl_right; /* lrr: RH or RH-1 */ 227 node->avl_left = lrr; /* lrr: RH or RH-1 */ 228 node->avl_right = r; /* r: RH */ 229 node->avl_height = rh + 1; /* node: RH+1 */ 230 l->avl_left = ll; /* ll: RH */ 231 l->avl_right = lrl; /* lrl: RH or RH-1 */ 232 l->avl_height = rh + 1; /* l: RH+1 */ 233 lr->avl_left = l; /* l: RH+1 */ 234 lr->avl_right = node; /* node: RH+1 */ 235 lr->avl_height = rh + 2; 236 *nodep = lr; 237 } 238 } else if (rh > lh + 1) { /* r: LH+2 */ 239 struct inet_peer *rr, *rl, *rlr, *rll; 240 int rlh; 241 rr = r->avl_right; 242 rl = r->avl_left; 243 rlh = node_height(rl); 244 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 245 node->avl_right = rl; /* rl: LH or LH+1 */ 246 node->avl_left = l; /* l: LH */ 247 node->avl_height = rlh + 1; /* LH+1 or LH+2 */ 248 r->avl_right = rr; /* rr: LH+1 */ 249 r->avl_left = node; /* node: LH+1 or LH+2 */ 250 r->avl_height = node->avl_height + 1; 251 *nodep = r; 252 } else { /* rr: RH, rl: RH+1 */ 253 rlr = rl->avl_right; /* rlr: LH or LH-1 */ 254 rll = rl->avl_left; /* rll: LH or LH-1 */ 255 node->avl_right = rll; /* rll: LH or LH-1 */ 256 node->avl_left = l; /* l: LH */ 257 node->avl_height = lh + 1; /* node: LH+1 */ 258 r->avl_right = rr; /* rr: LH */ 259 r->avl_left = rlr; /* rlr: LH or LH-1 */ 260 r->avl_height = lh + 1; /* r: LH+1 */ 261 rl->avl_right = r; /* r: LH+1 */ 262 rl->avl_left = node; /* node: LH+1 */ 263 rl->avl_height = lh + 2; 264 *nodep = rl; 265 } 266 } else { 267 node->avl_height = (lh > rh ? lh : rh) + 1; 268 } 269 } 270 } 271 272 /* Called with local BH disabled and the pool write lock held. */ 273 #define link_to_pool(n) \ 274 do { \ 275 n->avl_height = 1; \ 276 n->avl_left = peer_avl_empty; \ 277 n->avl_right = peer_avl_empty; \ 278 **--stackptr = n; \ 279 peer_avl_rebalance(stack, stackptr); \ 280 } while(0) 281 282 /* May be called with local BH enabled. */ 283 static void unlink_from_pool(struct inet_peer *p) 284 { 285 int do_free; 286 287 do_free = 0; 288 289 write_lock_bh(&peer_pool_lock); 290 /* Check the reference counter. It was artificially incremented by 1 291 * in cleanup() function to prevent sudden disappearing. If the 292 * reference count is still 1 then the node is referenced only as `p' 293 * here and from the pool. So under the exclusive pool lock it's safe 294 * to remove the node and free it later. */ 295 if (atomic_read(&p->refcnt) == 1) { 296 struct inet_peer **stack[PEER_MAXDEPTH]; 297 struct inet_peer ***stackptr, ***delp; 298 if (lookup(p->v4daddr) != p) 299 BUG(); 300 delp = stackptr - 1; /* *delp[0] == p */ 301 if (p->avl_left == peer_avl_empty) { 302 *delp[0] = p->avl_right; 303 --stackptr; 304 } else { 305 /* look for a node to insert instead of p */ 306 struct inet_peer *t; 307 t = lookup_rightempty(p); 308 if (*stackptr[-1] != t) 309 BUG(); 310 **--stackptr = t->avl_left; 311 /* t is removed, t->v4daddr > x->v4daddr for any 312 * x in p->avl_left subtree. 313 * Put t in the old place of p. */ 314 *delp[0] = t; 315 t->avl_left = p->avl_left; 316 t->avl_right = p->avl_right; 317 t->avl_height = p->avl_height; 318 if (delp[1] != &p->avl_left) 319 BUG(); 320 delp[1] = &t->avl_left; /* was &p->avl_left */ 321 } 322 peer_avl_rebalance(stack, stackptr); 323 peer_total--; 324 do_free = 1; 325 } 326 write_unlock_bh(&peer_pool_lock); 327 328 if (do_free) 329 kmem_cache_free(peer_cachep, p); 330 else 331 /* The node is used again. Decrease the reference counter 332 * back. The loop "cleanup -> unlink_from_unused 333 * -> unlink_from_pool -> putpeer -> link_to_unused 334 * -> cleanup (for the same node)" 335 * doesn't really exist because the entry will have a 336 * recent deletion time and will not be cleaned again soon. */ 337 inet_putpeer(p); 338 } 339 340 /* May be called with local BH enabled. */ 341 static int cleanup_once(unsigned long ttl) 342 { 343 struct inet_peer *p; 344 345 /* Remove the first entry from the list of unused nodes. */ 346 spin_lock_bh(&inet_peer_unused_lock); 347 p = inet_peer_unused_head; 348 if (p != NULL) { 349 if (time_after(p->dtime + ttl, jiffies)) { 350 /* Do not prune fresh entries. */ 351 spin_unlock_bh(&inet_peer_unused_lock); 352 return -1; 353 } 354 inet_peer_unused_head = p->unused_next; 355 if (p->unused_next != NULL) 356 p->unused_next->unused_prevp = p->unused_prevp; 357 else 358 inet_peer_unused_tailp = p->unused_prevp; 359 p->unused_prevp = NULL; /* mark as not on the list */ 360 /* Grab an extra reference to prevent node disappearing 361 * before unlink_from_pool() call. */ 362 atomic_inc(&p->refcnt); 363 } 364 spin_unlock_bh(&inet_peer_unused_lock); 365 366 if (p == NULL) 367 /* It means that the total number of USED entries has 368 * grown over inet_peer_threshold. It shouldn't really 369 * happen because of entry limits in route cache. */ 370 return -1; 371 372 unlink_from_pool(p); 373 return 0; 374 } 375 376 /* Called with or without local BH being disabled. */ 377 struct inet_peer *inet_getpeer(__u32 daddr, int create) 378 { 379 struct inet_peer *p, *n; 380 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; 381 382 /* Look up for the address quickly. */ 383 read_lock_bh(&peer_pool_lock); 384 p = lookup(daddr); 385 if (p != peer_avl_empty) 386 atomic_inc(&p->refcnt); 387 read_unlock_bh(&peer_pool_lock); 388 389 if (p != peer_avl_empty) { 390 /* The existing node has been found. */ 391 /* Remove the entry from unused list if it was there. */ 392 unlink_from_unused(p); 393 return p; 394 } 395 396 if (!create) 397 return NULL; 398 399 /* Allocate the space outside the locked region. */ 400 n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); 401 if (n == NULL) 402 return NULL; 403 n->v4daddr = daddr; 404 atomic_set(&n->refcnt, 1); 405 n->ip_id_count = secure_ip_id(daddr); 406 n->tcp_ts_stamp = 0; 407 408 write_lock_bh(&peer_pool_lock); 409 /* Check if an entry has suddenly appeared. */ 410 p = lookup(daddr); 411 if (p != peer_avl_empty) 412 goto out_free; 413 414 /* Link the node. */ 415 link_to_pool(n); 416 n->unused_prevp = NULL; /* not on the list */ 417 peer_total++; 418 write_unlock_bh(&peer_pool_lock); 419 420 if (peer_total >= inet_peer_threshold) 421 /* Remove one less-recently-used entry. */ 422 cleanup_once(0); 423 424 return n; 425 426 out_free: 427 /* The appropriate node is already in the pool. */ 428 atomic_inc(&p->refcnt); 429 write_unlock_bh(&peer_pool_lock); 430 /* Remove the entry from unused list if it was there. */ 431 unlink_from_unused(p); 432 /* Free preallocated the preallocated node. */ 433 kmem_cache_free(peer_cachep, n); 434 return p; 435 } 436 437 /* Called with local BH disabled. */ 438 static void peer_check_expire(unsigned long dummy) 439 { 440 int i; 441 int ttl; 442 443 if (peer_total >= inet_peer_threshold) 444 ttl = inet_peer_minttl; 445 else 446 ttl = inet_peer_maxttl 447 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 448 peer_total / inet_peer_threshold * HZ; 449 for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); 450 451 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 452 * interval depending on the total number of entries (more entries, 453 * less interval). */ 454 if (peer_total >= inet_peer_threshold) 455 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; 456 else 457 peer_periodic_timer.expires = jiffies 458 + inet_peer_gc_maxtime 459 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * 460 peer_total / inet_peer_threshold * HZ; 461 add_timer(&peer_periodic_timer); 462 } 463