inetpeer.c (b981d8b3f5e008ff10d993be633ad00564fc22cd) inetpeer.c (d71209ded2ba6010070d02005384897c59859d00)
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
7 *
8 * Authors: Andrey V. Savochkin <saw@msu.ru>

--- 47 unchanged lines hidden (view full) ---

56 * 1. Nodes may appear in the tree only with the pool write lock held.
57 * 2. Nodes may disappear from the tree only with the pool write lock held
58 * AND reference count being 0.
59 * 3. Nodes appears and disappears from unused node list only under
60 * "inet_peer_unused_lock".
61 * 4. Global variable peer_total is modified under the pool lock.
62 * 5. struct inet_peer fields modification:
63 * avl_left, avl_right, avl_parent, avl_height: pool lock
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
7 *
8 * Authors: Andrey V. Savochkin <saw@msu.ru>

--- 47 unchanged lines hidden (view full) ---

56 * 1. Nodes may appear in the tree only with the pool write lock held.
57 * 2. Nodes may disappear from the tree only with the pool write lock held
58 * AND reference count being 0.
59 * 3. Nodes appears and disappears from unused node list only under
60 * "inet_peer_unused_lock".
61 * 4. Global variable peer_total is modified under the pool lock.
62 * 5. struct inet_peer fields modification:
63 * avl_left, avl_right, avl_parent, avl_height: pool lock
64 * unused_next, unused_prevp: unused node list lock
64 * unused: unused node list lock
65 * refcnt: atomically against modifications on other CPU;
66 * usually under some other lock to prevent node disappearing
67 * dtime: unused node list lock
68 * v4daddr: unchangeable
69 * ip_id_count: idlock
70 */
71
72/* Exported for inet_getid inline function. */

--- 16 unchanged lines hidden (view full) ---

89/* Exported for sysctl_net_ipv4. */
90int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
91 * aggressively at this stage */
92int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
93int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
94int inet_peer_gc_mintime __read_mostly = 10 * HZ;
95int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
96
65 * refcnt: atomically against modifications on other CPU;
66 * usually under some other lock to prevent node disappearing
67 * dtime: unused node list lock
68 * v4daddr: unchangeable
69 * ip_id_count: idlock
70 */
71
72/* Exported for inet_getid inline function. */

--- 16 unchanged lines hidden (view full) ---

89/* Exported for sysctl_net_ipv4. */
90int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
91 * aggressively at this stage */
92int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
93int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
94int inet_peer_gc_mintime __read_mostly = 10 * HZ;
95int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
96
97static struct inet_peer *inet_peer_unused_head;
98static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
97static LIST_HEAD(unused_peers);
99static DEFINE_SPINLOCK(inet_peer_unused_lock);
100
101static void peer_check_expire(unsigned long dummy);
102static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
103
104
105/* Called from ip_output.c:ip_init */
106void __init inet_initpeers(void)

--- 26 unchanged lines hidden (view full) ---

133 + inet_peer_gc_maxtime;
134 add_timer(&peer_periodic_timer);
135}
136
137/* Called with or without local BH being disabled. */
138static void unlink_from_unused(struct inet_peer *p)
139{
140 spin_lock_bh(&inet_peer_unused_lock);
98static DEFINE_SPINLOCK(inet_peer_unused_lock);
99
100static void peer_check_expire(unsigned long dummy);
101static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
102
103
104/* Called from ip_output.c:ip_init */
105void __init inet_initpeers(void)

--- 26 unchanged lines hidden (view full) ---

132 + inet_peer_gc_maxtime;
133 add_timer(&peer_periodic_timer);
134}
135
136/* Called with or without local BH being disabled. */
137static void unlink_from_unused(struct inet_peer *p)
138{
139 spin_lock_bh(&inet_peer_unused_lock);
141 if (p->unused_prevp != NULL) {
142 /* On unused list. */
143 *p->unused_prevp = p->unused_next;
144 if (p->unused_next != NULL)
145 p->unused_next->unused_prevp = p->unused_prevp;
146 else
147 inet_peer_unused_tailp = p->unused_prevp;
148 p->unused_prevp = NULL; /* mark it as removed */
149 }
140 list_del_init(&p->unused);
150 spin_unlock_bh(&inet_peer_unused_lock);
151}
152
153/*
154 * Called with local BH disabled and the pool lock held.
155 * _stack is known to be NULL or not at compile time,
156 * so compiler will optimize the if (_stack) tests.
157 */

--- 174 unchanged lines hidden (view full) ---

332 * doesn't really exist because the entry will have a
333 * recent deletion time and will not be cleaned again soon. */
334 inet_putpeer(p);
335}
336
337/* May be called with local BH enabled. */
338static int cleanup_once(unsigned long ttl)
339{
141 spin_unlock_bh(&inet_peer_unused_lock);
142}
143
144/*
145 * Called with local BH disabled and the pool lock held.
146 * _stack is known to be NULL or not at compile time,
147 * so compiler will optimize the if (_stack) tests.
148 */

--- 174 unchanged lines hidden (view full) ---

323 * doesn't really exist because the entry will have a
324 * recent deletion time and will not be cleaned again soon. */
325 inet_putpeer(p);
326}
327
328/* May be called with local BH enabled. */
329static int cleanup_once(unsigned long ttl)
330{
340 struct inet_peer *p;
331 struct inet_peer *p = NULL;
341
342 /* Remove the first entry from the list of unused nodes. */
343 spin_lock_bh(&inet_peer_unused_lock);
332
333 /* Remove the first entry from the list of unused nodes. */
334 spin_lock_bh(&inet_peer_unused_lock);
344 p = inet_peer_unused_head;
345 if (p != NULL) {
346 __u32 delta = (__u32)jiffies - p->dtime;
335 if (!list_empty(&unused_peers)) {
336 __u32 delta;
337
338 p = list_first_entry(&unused_peers, struct inet_peer, unused);
339 delta = (__u32)jiffies - p->dtime;
340
347 if (delta < ttl) {
348 /* Do not prune fresh entries. */
349 spin_unlock_bh(&inet_peer_unused_lock);
350 return -1;
351 }
341 if (delta < ttl) {
342 /* Do not prune fresh entries. */
343 spin_unlock_bh(&inet_peer_unused_lock);
344 return -1;
345 }
352 inet_peer_unused_head = p->unused_next;
353 if (p->unused_next != NULL)
354 p->unused_next->unused_prevp = p->unused_prevp;
355 else
356 inet_peer_unused_tailp = p->unused_prevp;
357 p->unused_prevp = NULL; /* mark as not on the list */
346
347 list_del_init(&p->unused);
348
358 /* Grab an extra reference to prevent node disappearing
359 * before unlink_from_pool() call. */
360 atomic_inc(&p->refcnt);
361 }
362 spin_unlock_bh(&inet_peer_unused_lock);
363
364 if (p == NULL)
365 /* It means that the total number of USED entries has

--- 41 unchanged lines hidden (view full) ---

407 write_lock_bh(&peer_pool_lock);
408 /* Check if an entry has suddenly appeared. */
409 p = lookup(daddr, stack);
410 if (p != peer_avl_empty)
411 goto out_free;
412
413 /* Link the node. */
414 link_to_pool(n);
349 /* Grab an extra reference to prevent node disappearing
350 * before unlink_from_pool() call. */
351 atomic_inc(&p->refcnt);
352 }
353 spin_unlock_bh(&inet_peer_unused_lock);
354
355 if (p == NULL)
356 /* It means that the total number of USED entries has

--- 41 unchanged lines hidden (view full) ---

398 write_lock_bh(&peer_pool_lock);
399 /* Check if an entry has suddenly appeared. */
400 p = lookup(daddr, stack);
401 if (p != peer_avl_empty)
402 goto out_free;
403
404 /* Link the node. */
405 link_to_pool(n);
415 n->unused_prevp = NULL; /* not on the list */
406 INIT_LIST_HEAD(&n->unused);
416 peer_total++;
417 write_unlock_bh(&peer_pool_lock);
418
419 if (peer_total >= inet_peer_threshold)
420 /* Remove one less-recently-used entry. */
421 cleanup_once(0);
422
423 return n;

--- 38 unchanged lines hidden (view full) ---

462 peer_total / inet_peer_threshold * HZ;
463 add_timer(&peer_periodic_timer);
464}
465
466void inet_putpeer(struct inet_peer *p)
467{
468 spin_lock_bh(&inet_peer_unused_lock);
469 if (atomic_dec_and_test(&p->refcnt)) {
407 peer_total++;
408 write_unlock_bh(&peer_pool_lock);
409
410 if (peer_total >= inet_peer_threshold)
411 /* Remove one less-recently-used entry. */
412 cleanup_once(0);
413
414 return n;

--- 38 unchanged lines hidden (view full) ---

453 peer_total / inet_peer_threshold * HZ;
454 add_timer(&peer_periodic_timer);
455}
456
457void inet_putpeer(struct inet_peer *p)
458{
459 spin_lock_bh(&inet_peer_unused_lock);
460 if (atomic_dec_and_test(&p->refcnt)) {
470 p->unused_prevp = inet_peer_unused_tailp;
471 p->unused_next = NULL;
472 *inet_peer_unused_tailp = p;
473 inet_peer_unused_tailp = &p->unused_next;
461 list_add_tail(&p->unused, &unused_peers);
474 p->dtime = (__u32)jiffies;
475 }
476 spin_unlock_bh(&inet_peer_unused_lock);
477}
462 p->dtime = (__u32)jiffies;
463 }
464 spin_unlock_bh(&inet_peer_unused_lock);
465}