inetpeer.c (851bdd11ca8b855ee946f50dac0850a4bec875c9) inetpeer.c (73f156a6e8c1074ac6327e0abd1169e95eb66463)
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8

--- 12 unchanged lines hidden (view full) ---

21#include <net/ip.h>
22#include <net/inetpeer.h>
23#include <net/secure_seq.h>
24
25/*
26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes.
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8

--- 12 unchanged lines hidden (view full) ---

21#include <net/ip.h>
22#include <net/inetpeer.h>
23#include <net/secure_seq.h>
24
25/*
26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes.
29 * At this moment this information consists only of ID field for the next
30 * outgoing IP packet. This field is incremented with each packet as encoded
31 * in inet_getid() function (include/net/inetpeer.h).
32 * At the moment of writing this notes identifier of IP packets is generated
33 * to be unpredictable using this code only for packets subjected
34 * (actually or potentially) to defragmentation. I.e. DF packets less than
35 * PMTU in size when local fragmentation is disabled use a constant ID and do
36 * not use this code (see ip_select_ident() in include/net/ip.h).
37 *
29 *
38 * Route cache entries hold references to our nodes.
39 * New cache entries get references via lookup by destination IP address in
40 * the avl tree. The reference is grabbed only when it's needed i.e. only
41 * when we try to output IP packet which needs an unpredictable ID (see
42 * __ip_select_ident() in net/ipv4/route.c).
43 * Nodes are removed only when reference counter goes to 0.
44 * When it's happened the node may be removed when a sufficient amount of
45 * time has been passed since its last use. The less-recently-used entry can
46 * also be removed if the pool is overloaded i.e. if the total amount of
47 * entries is greater-or-equal than the threshold.
48 *
49 * Node pool is organised as an AVL tree.
50 * Such an implementation has been chosen not just for fun. It's a way to

--- 6 unchanged lines hidden (view full) ---

57 * 2. Nodes may disappear from the tree only with the pool lock held
58 * AND reference count being 0.
59 * 3. Global variable peer_total is modified under the pool lock.
60 * 4. struct inet_peer fields modification:
61 * avl_left, avl_right, avl_parent, avl_height: pool lock
62 * refcnt: atomically against modifications on other CPU;
63 * usually under some other lock to prevent node disappearing
64 * daddr: unchangeable
30 * Nodes are removed only when reference counter goes to 0.
31 * When it's happened the node may be removed when a sufficient amount of
32 * time has been passed since its last use. The less-recently-used entry can
33 * also be removed if the pool is overloaded i.e. if the total amount of
34 * entries is greater-or-equal than the threshold.
35 *
36 * Node pool is organised as an AVL tree.
37 * Such an implementation has been chosen not just for fun. It's a way to

--- 6 unchanged lines hidden (view full) ---

44 * 2. Nodes may disappear from the tree only with the pool lock held
45 * AND reference count being 0.
46 * 3. Global variable peer_total is modified under the pool lock.
47 * 4. struct inet_peer fields modification:
48 * avl_left, avl_right, avl_parent, avl_height: pool lock
49 * refcnt: atomically against modifications on other CPU;
50 * usually under some other lock to prevent node disappearing
51 * daddr: unchangeable
65 * ip_id_count: atomic value (no lock needed)
66 */
67
68static struct kmem_cache *peer_cachep __read_mostly;
69
70static LIST_HEAD(gc_list);
71static const int gc_delay = 60 * HZ;
72static struct delayed_work gc_work;
73static DEFINE_SPINLOCK(gc_lock);

--- 418 unchanged lines hidden (view full) ---

492 if (gccnt && create)
493 goto relookup;
494 }
495 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
496 if (p) {
497 p->daddr = *daddr;
498 atomic_set(&p->refcnt, 1);
499 atomic_set(&p->rid, 0);
52 */
53
54static struct kmem_cache *peer_cachep __read_mostly;
55
56static LIST_HEAD(gc_list);
57static const int gc_delay = 60 * HZ;
58static struct delayed_work gc_work;
59static DEFINE_SPINLOCK(gc_lock);

--- 418 unchanged lines hidden (view full) ---

478 if (gccnt && create)
479 goto relookup;
480 }
481 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
482 if (p) {
483 p->daddr = *daddr;
484 atomic_set(&p->refcnt, 1);
485 atomic_set(&p->rid, 0);
500 atomic_set(&p->ip_id_count,
501 (daddr->family == AF_INET) ?
502 secure_ip_id(daddr->addr.a4) :
503 secure_ipv6_id(daddr->addr.a6));
504 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
505 p->rate_tokens = 0;
506 /* 60*HZ is arbitrary, but chosen enough high so that the first
507 * calculation of tokens is at its maximum.
508 */
509 p->rate_last = jiffies - 60*HZ;
510 INIT_LIST_HEAD(&p->gc_list);
511

--- 86 unchanged lines hidden ---
486 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
487 p->rate_tokens = 0;
488 /* 60*HZ is arbitrary, but chosen enough high so that the first
489 * calculation of tokens is at its maximum.
490 */
491 p->rate_last = jiffies - 60*HZ;
492 INIT_LIST_HEAD(&p->gc_list);
493

--- 86 unchanged lines hidden ---