inetpeer.c (c1864cfb80a64933c221e33fed9611356c031944) inetpeer.c (c8a627ed06d6d49bf65015a2185c519335c4c83f)
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8

--- 74 unchanged lines hidden (view full) ---

83};
84
85struct inet_peer_base {
86 struct inet_peer __rcu *root;
87 seqlock_t lock;
88 int total;
89};
90
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8

--- 74 unchanged lines hidden (view full) ---

83};
84
85struct inet_peer_base {
86 struct inet_peer __rcu *root;
87 seqlock_t lock;
88 int total;
89};
90
91static struct inet_peer_base v4_peers = {
92 .root = peer_avl_empty_rcu,
93 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
94 .total = 0,
95};
96
97static struct inet_peer_base v6_peers = {
98 .root = peer_avl_empty_rcu,
99 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
100 .total = 0,
101};
102
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104
105/* Exported for sysctl_net_ipv4. */
106int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
107 * aggressively at this stage */
108int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
109int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
110

--- 37 unchanged lines hidden (view full) ---

148
149 spin_lock_bh(&gc_lock);
150 list_splice(&list, &gc_list);
151 spin_unlock_bh(&gc_lock);
152
153 schedule_delayed_work(&gc_work, gc_delay);
154}
155
91#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
92
93/* Exported for sysctl_net_ipv4. */
94int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
95 * aggressively at this stage */
96int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
97int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
98

--- 37 unchanged lines hidden (view full) ---

136
137 spin_lock_bh(&gc_lock);
138 list_splice(&list, &gc_list);
139 spin_unlock_bh(&gc_lock);
140
141 schedule_delayed_work(&gc_work, gc_delay);
142}
143
144static int __net_init inetpeer_net_init(struct net *net)
145{
146 net->ipv4.peers = kzalloc(sizeof(struct inet_peer_base),
147 GFP_KERNEL);
148 if (net->ipv4.peers == NULL)
149 return -ENOMEM;
150
151 net->ipv4.peers->root = peer_avl_empty_rcu;
152 seqlock_init(&net->ipv4.peers->lock);
153
154 net->ipv6.peers = kzalloc(sizeof(struct inet_peer_base),
155 GFP_KERNEL);
156 if (net->ipv6.peers == NULL)
157 goto out_ipv6;
158
159 net->ipv6.peers->root = peer_avl_empty_rcu;
160 seqlock_init(&net->ipv6.peers->lock);
161
162 return 0;
163out_ipv6:
164 kfree(net->ipv4.peers);
165 return -ENOMEM;
166}
167
168static void __net_exit inetpeer_net_exit(struct net *net)
169{
170 inetpeer_invalidate_tree(net, AF_INET);
171 kfree(net->ipv4.peers);
172 net->ipv4.peers = NULL;
173
174 inetpeer_invalidate_tree(net, AF_INET6);
175 kfree(net->ipv6.peers);
176 net->ipv6.peers = NULL;
177}
178
179static struct pernet_operations inetpeer_ops = {
180 .init = inetpeer_net_init,
181 .exit = inetpeer_net_exit,
182};
183
156/* Called from ip_output.c:ip_init */
157void __init inet_initpeers(void)
158{
159 struct sysinfo si;
160
161 /* Use the straight interface to information about memory. */
162 si_meminfo(&si);
163 /* The values below were suggested by Alexey Kuznetsov

--- 8 unchanged lines hidden (view full) ---

172 inet_peer_threshold >>= 2; /* about 128KB */
173
174 peer_cachep = kmem_cache_create("inet_peer_cache",
175 sizeof(struct inet_peer),
176 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
177 NULL);
178
179 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
184/* Called from ip_output.c:ip_init */
185void __init inet_initpeers(void)
186{
187 struct sysinfo si;
188
189 /* Use the straight interface to information about memory. */
190 si_meminfo(&si);
191 /* The values below were suggested by Alexey Kuznetsov

--- 8 unchanged lines hidden (view full) ---

200 inet_peer_threshold >>= 2; /* about 128KB */
201
202 peer_cachep = kmem_cache_create("inet_peer_cache",
203 sizeof(struct inet_peer),
204 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
205 NULL);
206
207 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
208 register_pernet_subsys(&inetpeer_ops);
180}
181
182static int addr_compare(const struct inetpeer_addr *a,
183 const struct inetpeer_addr *b)
184{
185 int i, n = (a->family == AF_INET ? 1 : 4);
186
187 for (i = 0; i < n; i++) {

--- 208 unchanged lines hidden (view full) ---

396 BUG_ON(delp[1] != &p->avl_left);
397 delp[1] = &t->avl_left; /* was &p->avl_left */
398 }
399 peer_avl_rebalance(stack, stackptr, base);
400 base->total--;
401 call_rcu(&p->rcu, inetpeer_free_rcu);
402}
403
209}
210
211static int addr_compare(const struct inetpeer_addr *a,
212 const struct inetpeer_addr *b)
213{
214 int i, n = (a->family == AF_INET ? 1 : 4);
215
216 for (i = 0; i < n; i++) {

--- 208 unchanged lines hidden (view full) ---

425 BUG_ON(delp[1] != &p->avl_left);
426 delp[1] = &t->avl_left; /* was &p->avl_left */
427 }
428 peer_avl_rebalance(stack, stackptr, base);
429 base->total--;
430 call_rcu(&p->rcu, inetpeer_free_rcu);
431}
432
404static struct inet_peer_base *family_to_base(int family)
433static struct inet_peer_base *family_to_base(struct net *net,
434 int family)
405{
435{
406 return family == AF_INET ? &v4_peers : &v6_peers;
436 return family == AF_INET ? net->ipv4.peers : net->ipv6.peers;
407}
408
409/* perform garbage collect on all items stacked during a lookup */
410static int inet_peer_gc(struct inet_peer_base *base,
411 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
412 struct inet_peer __rcu ***stackptr)
413{
414 struct inet_peer *p, *gchead = NULL;

--- 23 unchanged lines hidden (view full) ---

438 while ((p = gchead) != NULL) {
439 gchead = p->gc_next;
440 cnt++;
441 unlink_from_pool(p, base, stack);
442 }
443 return cnt;
444}
445
437}
438
439/* perform garbage collect on all items stacked during a lookup */
440static int inet_peer_gc(struct inet_peer_base *base,
441 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
442 struct inet_peer __rcu ***stackptr)
443{
444 struct inet_peer *p, *gchead = NULL;

--- 23 unchanged lines hidden (view full) ---

468 while ((p = gchead) != NULL) {
469 gchead = p->gc_next;
470 cnt++;
471 unlink_from_pool(p, base, stack);
472 }
473 return cnt;
474}
475
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
476struct inet_peer *inet_getpeer(struct net *net,
477 const struct inetpeer_addr *daddr,
478 int create)
447{
448 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
479{
480 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449 struct inet_peer_base *base = family_to_base(daddr->family);
481 struct inet_peer_base *base = family_to_base(net, daddr->family);
450 struct inet_peer *p;
451 unsigned int sequence;
452 int invalidated, gccnt = 0;
453
454 /* Attempt a lockless lookup first.
455 * Because of a concurrent writer, we might not find an existing entry.
456 */
457 rcu_read_lock();

--- 108 unchanged lines hidden (view full) ---

566
567 spin_lock_bh(&gc_lock);
568 list_add_tail(&p->gc_list, &gc_list);
569 spin_unlock_bh(&gc_lock);
570
571 schedule_delayed_work(&gc_work, gc_delay);
572}
573
482 struct inet_peer *p;
483 unsigned int sequence;
484 int invalidated, gccnt = 0;
485
486 /* Attempt a lockless lookup first.
487 * Because of a concurrent writer, we might not find an existing entry.
488 */
489 rcu_read_lock();

--- 108 unchanged lines hidden (view full) ---

598
599 spin_lock_bh(&gc_lock);
600 list_add_tail(&p->gc_list, &gc_list);
601 spin_unlock_bh(&gc_lock);
602
603 schedule_delayed_work(&gc_work, gc_delay);
604}
605
574void inetpeer_invalidate_tree(int family)
606void inetpeer_invalidate_tree(struct net *net, int family)
575{
576 struct inet_peer *old, *new, *prev;
607{
608 struct inet_peer *old, *new, *prev;
577 struct inet_peer_base *base = family_to_base(family);
609 struct inet_peer_base *base = family_to_base(net, family);
578
579 write_seqlock_bh(&base->lock);
580
581 old = base->root;
582 if (old == peer_avl_empty_rcu)
583 goto out;
584
585 new = peer_avl_empty_rcu;
586
587 prev = cmpxchg(&base->root, old, new);
588 if (prev == old) {
589 base->total = 0;
590 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
591 }
592
593out:
594 write_sequnlock_bh(&base->lock);
595}
596EXPORT_SYMBOL(inetpeer_invalidate_tree);
610
611 write_seqlock_bh(&base->lock);
612
613 old = base->root;
614 if (old == peer_avl_empty_rcu)
615 goto out;
616
617 new = peer_avl_empty_rcu;
618
619 prev = cmpxchg(&base->root, old, new);
620 if (prev == old) {
621 base->total = 0;
622 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
623 }
624
625out:
626 write_sequnlock_bh(&base->lock);
627}
628EXPORT_SYMBOL(inetpeer_invalidate_tree);