11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INETPEER - A storage for permanent information about peers 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * This source is covered by the GNU GPL, the same as all kernel sources. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Authors: Andrey V. Savochkin <saw@msu.ru> 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds #include <linux/module.h> 101da177e4SLinus Torvalds #include <linux/types.h> 111da177e4SLinus Torvalds #include <linux/slab.h> 121da177e4SLinus Torvalds #include <linux/interrupt.h> 131da177e4SLinus Torvalds #include <linux/spinlock.h> 141da177e4SLinus Torvalds #include <linux/random.h> 151da177e4SLinus Torvalds #include <linux/timer.h> 161da177e4SLinus Torvalds #include <linux/time.h> 171da177e4SLinus Torvalds #include <linux/kernel.h> 181da177e4SLinus Torvalds #include <linux/mm.h> 191da177e4SLinus Torvalds #include <linux/net.h> 205faa5df1SSteffen Klassert #include <linux/workqueue.h> 2120380731SArnaldo Carvalho de Melo #include <net/ip.h> 221da177e4SLinus Torvalds #include <net/inetpeer.h> 236e5714eaSDavid S. Miller #include <net/secure_seq.h> 241da177e4SLinus Torvalds 251da177e4SLinus Torvalds /* 261da177e4SLinus Torvalds * Theory of operations. 271da177e4SLinus Torvalds * We keep one entry for each peer IP address. The nodes contains long-living 281da177e4SLinus Torvalds * information about the peer which doesn't depend on routes. 291da177e4SLinus Torvalds * 301da177e4SLinus Torvalds * Nodes are removed only when reference counter goes to 0. 311da177e4SLinus Torvalds * When it's happened the node may be removed when a sufficient amount of 321da177e4SLinus Torvalds * time has been passed since its last use. The less-recently-used entry can 331da177e4SLinus Torvalds * also be removed if the pool is overloaded i.e. if the total amount of 341da177e4SLinus Torvalds * entries is greater-or-equal than the threshold. 351da177e4SLinus Torvalds * 36b145425fSEric Dumazet * Node pool is organised as an RB tree. 371da177e4SLinus Torvalds * Such an implementation has been chosen not just for fun. It's a way to 381da177e4SLinus Torvalds * prevent easy and efficient DoS attacks by creating hash collisions. A huge 391da177e4SLinus Torvalds * amount of long living nodes in a single hash slot would significantly delay 401da177e4SLinus Torvalds * lookups performed with disabled BHs. 411da177e4SLinus Torvalds * 421da177e4SLinus Torvalds * Serialisation issues. 43aa1039e7SEric Dumazet * 1. Nodes may appear in the tree only with the pool lock held. 44aa1039e7SEric Dumazet * 2. Nodes may disappear from the tree only with the pool lock held 451da177e4SLinus Torvalds * AND reference count being 0. 464b9d9be8SEric Dumazet * 3. Global variable peer_total is modified under the pool lock. 474b9d9be8SEric Dumazet * 4. struct inet_peer fields modification: 48b145425fSEric Dumazet * rb_node: pool lock 491da177e4SLinus Torvalds * refcnt: atomically against modifications on other CPU; 501da177e4SLinus Torvalds * usually under some other lock to prevent node disappearing 51582a72daSDavid S. Miller * daddr: unchangeable 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds 54e18b890bSChristoph Lameter static struct kmem_cache *peer_cachep __read_mostly; 551da177e4SLinus Torvalds 56c3426b47SDavid S. Miller void inet_peer_base_init(struct inet_peer_base *bp) 57c3426b47SDavid S. Miller { 58b145425fSEric Dumazet bp->rb_root = RB_ROOT; 59c3426b47SDavid S. Miller seqlock_init(&bp->lock); 60c3426b47SDavid S. Miller bp->total = 0; 61c3426b47SDavid S. Miller } 62c3426b47SDavid S. Miller EXPORT_SYMBOL_GPL(inet_peer_base_init); 63021e9299SDavid S. Miller 64b145425fSEric Dumazet #define PEER_MAX_GC 32 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds /* Exported for sysctl_net_ipv4. */ 67243bbcaaSEric Dumazet int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more 681da177e4SLinus Torvalds * aggressively at this stage */ 69243bbcaaSEric Dumazet int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 70243bbcaaSEric Dumazet int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds /* Called from ip_output.c:ip_init */ 731da177e4SLinus Torvalds void __init inet_initpeers(void) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds struct sysinfo si; 761da177e4SLinus Torvalds 771da177e4SLinus Torvalds /* Use the straight interface to information about memory. */ 781da177e4SLinus Torvalds si_meminfo(&si); 791da177e4SLinus Torvalds /* The values below were suggested by Alexey Kuznetsov 801da177e4SLinus Torvalds * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values 811da177e4SLinus Torvalds * myself. --SAW 821da177e4SLinus Torvalds */ 831da177e4SLinus Torvalds if (si.totalram <= (32768*1024)/PAGE_SIZE) 841da177e4SLinus Torvalds inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ 851da177e4SLinus Torvalds if (si.totalram <= (16384*1024)/PAGE_SIZE) 861da177e4SLinus Torvalds inet_peer_threshold >>= 1; /* about 512KB */ 871da177e4SLinus Torvalds if (si.totalram <= (8192*1024)/PAGE_SIZE) 881da177e4SLinus Torvalds inet_peer_threshold >>= 2; /* about 128KB */ 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds peer_cachep = kmem_cache_create("inet_peer_cache", 911da177e4SLinus Torvalds sizeof(struct inet_peer), 92317fe0e6SEric Dumazet 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 9320c2df83SPaul Mundt NULL); 94d6cc1d64SEric Dumazet } 951da177e4SLinus Torvalds 96b145425fSEric Dumazet /* Called with rcu_read_lock() or base->lock held */ 97b145425fSEric Dumazet static struct inet_peer *lookup(const struct inetpeer_addr *daddr, 98b145425fSEric Dumazet struct inet_peer_base *base, 99b145425fSEric Dumazet unsigned int seq, 100b145425fSEric Dumazet struct inet_peer *gc_stack[], 101b145425fSEric Dumazet unsigned int *gc_cnt, 102b145425fSEric Dumazet struct rb_node **parent_p, 103b145425fSEric Dumazet struct rb_node ***pp_p) 104aa1039e7SEric Dumazet { 1054cc5b44bSEric Dumazet struct rb_node **pp, *parent, *next; 106b145425fSEric Dumazet struct inet_peer *p; 107aa1039e7SEric Dumazet 108b145425fSEric Dumazet pp = &base->rb_root.rb_node; 109b145425fSEric Dumazet parent = NULL; 1104cc5b44bSEric Dumazet while (1) { 111b145425fSEric Dumazet int cmp; 112b145425fSEric Dumazet 1134cc5b44bSEric Dumazet next = rcu_dereference_raw(*pp); 1144cc5b44bSEric Dumazet if (!next) 1154cc5b44bSEric Dumazet break; 1164cc5b44bSEric Dumazet parent = next; 117b145425fSEric Dumazet p = rb_entry(parent, struct inet_peer, rb_node); 118b145425fSEric Dumazet cmp = inetpeer_addr_cmp(daddr, &p->daddr); 11902663045SDavid S. Miller if (cmp == 0) { 120b145425fSEric Dumazet if (!refcount_inc_not_zero(&p->refcnt)) 121b145425fSEric Dumazet break; 122b145425fSEric Dumazet return p; 1231cc9a98bSReshetova, Elena } 124b145425fSEric Dumazet if (gc_stack) { 125b145425fSEric Dumazet if (*gc_cnt < PEER_MAX_GC) 126b145425fSEric Dumazet gc_stack[(*gc_cnt)++] = p; 127b145425fSEric Dumazet } else if (unlikely(read_seqretry(&base->lock, seq))) { 128aa1039e7SEric Dumazet break; 129aa1039e7SEric Dumazet } 130b145425fSEric Dumazet if (cmp == -1) 131*35f493b8SEric Dumazet pp = &next->rb_left; 132b145425fSEric Dumazet else 133*35f493b8SEric Dumazet pp = &next->rb_right; 134b145425fSEric Dumazet } 135b145425fSEric Dumazet *parent_p = parent; 136b145425fSEric Dumazet *pp_p = pp; 137aa1039e7SEric Dumazet return NULL; 138aa1039e7SEric Dumazet } 139aa1039e7SEric Dumazet 140aa1039e7SEric Dumazet static void inetpeer_free_rcu(struct rcu_head *head) 141aa1039e7SEric Dumazet { 142aa1039e7SEric Dumazet kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); 143aa1039e7SEric Dumazet } 144aa1039e7SEric Dumazet 1454b9d9be8SEric Dumazet /* perform garbage collect on all items stacked during a lookup */ 146b145425fSEric Dumazet static void inet_peer_gc(struct inet_peer_base *base, 147b145425fSEric Dumazet struct inet_peer *gc_stack[], 148b145425fSEric Dumazet unsigned int gc_cnt) 14998158f5aSDavid S. Miller { 150b145425fSEric Dumazet struct inet_peer *p; 1514b9d9be8SEric Dumazet __u32 delta, ttl; 152b145425fSEric Dumazet int i; 15398158f5aSDavid S. Miller 1544b9d9be8SEric Dumazet if (base->total >= inet_peer_threshold) 1554b9d9be8SEric Dumazet ttl = 0; /* be aggressive */ 1564b9d9be8SEric Dumazet else 1574b9d9be8SEric Dumazet ttl = inet_peer_maxttl 1584b9d9be8SEric Dumazet - (inet_peer_maxttl - inet_peer_minttl) / HZ * 1594b9d9be8SEric Dumazet base->total / inet_peer_threshold * HZ; 160b145425fSEric Dumazet for (i = 0; i < gc_cnt; i++) { 161b145425fSEric Dumazet p = gc_stack[i]; 162d71209deSPavel Emelyanov delta = (__u32)jiffies - p->dtime; 163b145425fSEric Dumazet if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) 164b145425fSEric Dumazet gc_stack[i] = NULL; 165b145425fSEric Dumazet } 166b145425fSEric Dumazet for (i = 0; i < gc_cnt; i++) { 167b145425fSEric Dumazet p = gc_stack[i]; 168b145425fSEric Dumazet if (p) { 169b145425fSEric Dumazet rb_erase(&p->rb_node, &base->rb_root); 170b145425fSEric Dumazet base->total--; 171b145425fSEric Dumazet call_rcu(&p->rcu, inetpeer_free_rcu); 1724b9d9be8SEric Dumazet } 1734b9d9be8SEric Dumazet } 1746d1a3e04SEric Dumazet } 175d71209deSPavel Emelyanov 176c0efc887SDavid S. Miller struct inet_peer *inet_getpeer(struct inet_peer_base *base, 177c8a627edSGao feng const struct inetpeer_addr *daddr, 178c8a627edSGao feng int create) 1791da177e4SLinus Torvalds { 180b145425fSEric Dumazet struct inet_peer *p, *gc_stack[PEER_MAX_GC]; 181b145425fSEric Dumazet struct rb_node **pp, *parent; 182b145425fSEric Dumazet unsigned int gc_cnt, seq; 183b145425fSEric Dumazet int invalidated; 1841da177e4SLinus Torvalds 1854b9d9be8SEric Dumazet /* Attempt a lockless lookup first. 186aa1039e7SEric Dumazet * Because of a concurrent writer, we might not find an existing entry. 187aa1039e7SEric Dumazet */ 1887b46ac4eSDavid S. Miller rcu_read_lock(); 189b145425fSEric Dumazet seq = read_seqbegin(&base->lock); 190b145425fSEric Dumazet p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); 191b145425fSEric Dumazet invalidated = read_seqretry(&base->lock, seq); 1927b46ac4eSDavid S. Miller rcu_read_unlock(); 1931da177e4SLinus Torvalds 1944b9d9be8SEric Dumazet if (p) 1951da177e4SLinus Torvalds return p; 1961da177e4SLinus Torvalds 19765e8354eSEric Dumazet /* If no writer did a change during our lookup, we can return early. */ 19865e8354eSEric Dumazet if (!create && !invalidated) 19965e8354eSEric Dumazet return NULL; 20065e8354eSEric Dumazet 201aa1039e7SEric Dumazet /* retry an exact lookup, taking the lock before. 202aa1039e7SEric Dumazet * At least, nodes should be hot in our cache. 203aa1039e7SEric Dumazet */ 204b145425fSEric Dumazet parent = NULL; 20565e8354eSEric Dumazet write_seqlock_bh(&base->lock); 206b145425fSEric Dumazet 207b145425fSEric Dumazet gc_cnt = 0; 208b145425fSEric Dumazet p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); 209b145425fSEric Dumazet if (!p && create) { 210b145425fSEric Dumazet p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); 211aa1039e7SEric Dumazet if (p) { 212b534ecf1SDavid S. Miller p->daddr = *daddr; 2131cc9a98bSReshetova, Elena refcount_set(&p->refcnt, 2); 214aa1039e7SEric Dumazet atomic_set(&p->rid, 0); 215144001bdSDavid S. Miller p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 21692d86829SDavid S. Miller p->rate_tokens = 0; 217bc9259a8SNicolas Dichtel /* 60*HZ is arbitrary, but chosen enough high so that the first 218bc9259a8SNicolas Dichtel * calculation of tokens is at its maximum. 219bc9259a8SNicolas Dichtel */ 220bc9259a8SNicolas Dichtel p->rate_last = jiffies - 60*HZ; 2211da177e4SLinus Torvalds 222b145425fSEric Dumazet rb_link_node(&p->rb_node, parent, pp); 223b145425fSEric Dumazet rb_insert_color(&p->rb_node, &base->rb_root); 22498158f5aSDavid S. Miller base->total++; 225aa1039e7SEric Dumazet } 226b145425fSEric Dumazet } 227b145425fSEric Dumazet if (gc_cnt) 228b145425fSEric Dumazet inet_peer_gc(base, gc_stack, gc_cnt); 22965e8354eSEric Dumazet write_sequnlock_bh(&base->lock); 2301da177e4SLinus Torvalds 2311da177e4SLinus Torvalds return p; 2321da177e4SLinus Torvalds } 233b3419363SDavid S. Miller EXPORT_SYMBOL_GPL(inet_getpeer); 23498158f5aSDavid S. Miller 2354663afe2SEric Dumazet void inet_putpeer(struct inet_peer *p) 2364663afe2SEric Dumazet { 2374663afe2SEric Dumazet p->dtime = (__u32)jiffies; 238b145425fSEric Dumazet 239b145425fSEric Dumazet if (refcount_dec_and_test(&p->refcnt)) 240b145425fSEric Dumazet call_rcu(&p->rcu, inetpeer_free_rcu); 2414663afe2SEric Dumazet } 242b3419363SDavid S. Miller EXPORT_SYMBOL_GPL(inet_putpeer); 24392d86829SDavid S. Miller 24492d86829SDavid S. Miller /* 24592d86829SDavid S. Miller * Check transmit rate limitation for given message. 24692d86829SDavid S. Miller * The rate information is held in the inet_peer entries now. 24792d86829SDavid S. Miller * This function is generic and could be used for other purposes 24892d86829SDavid S. Miller * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. 24992d86829SDavid S. Miller * 25092d86829SDavid S. Miller * Note that the same inet_peer fields are modified by functions in 25192d86829SDavid S. Miller * route.c too, but these work for packet destinations while xrlim_allow 25292d86829SDavid S. Miller * works for icmp destinations. This means the rate limiting information 25392d86829SDavid S. Miller * for one "ip object" is shared - and these ICMPs are twice limited: 25492d86829SDavid S. Miller * by source and by destination. 25592d86829SDavid S. Miller * 25692d86829SDavid S. Miller * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate 25792d86829SDavid S. Miller * SHOULD allow setting of rate limits 25892d86829SDavid S. Miller * 25992d86829SDavid S. Miller * Shared between ICMPv4 and ICMPv6. 26092d86829SDavid S. Miller */ 26192d86829SDavid S. Miller #define XRLIM_BURST_FACTOR 6 26292d86829SDavid S. Miller bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) 26392d86829SDavid S. Miller { 26492d86829SDavid S. Miller unsigned long now, token; 26592d86829SDavid S. Miller bool rc = false; 26692d86829SDavid S. Miller 26792d86829SDavid S. Miller if (!peer) 26892d86829SDavid S. Miller return true; 26992d86829SDavid S. Miller 27092d86829SDavid S. Miller token = peer->rate_tokens; 27192d86829SDavid S. Miller now = jiffies; 27292d86829SDavid S. Miller token += now - peer->rate_last; 27392d86829SDavid S. Miller peer->rate_last = now; 27492d86829SDavid S. Miller if (token > XRLIM_BURST_FACTOR * timeout) 27592d86829SDavid S. Miller token = XRLIM_BURST_FACTOR * timeout; 27692d86829SDavid S. Miller if (token >= timeout) { 27792d86829SDavid S. Miller token -= timeout; 27892d86829SDavid S. Miller rc = true; 27992d86829SDavid S. Miller } 28092d86829SDavid S. Miller peer->rate_tokens = token; 28192d86829SDavid S. Miller return rc; 28292d86829SDavid S. Miller } 28392d86829SDavid S. Miller EXPORT_SYMBOL(inet_peer_xrlim_allow); 2845faa5df1SSteffen Klassert 28556a6b248SDavid S. Miller void inetpeer_invalidate_tree(struct inet_peer_base *base) 2865faa5df1SSteffen Klassert { 287b145425fSEric Dumazet struct inet_peer *p, *n; 2885faa5df1SSteffen Klassert 289b145425fSEric Dumazet rbtree_postorder_for_each_entry_safe(p, n, &base->rb_root, rb_node) { 290b145425fSEric Dumazet inet_putpeer(p); 291b145425fSEric Dumazet cond_resched(); 2925faa5df1SSteffen Klassert } 2935faa5df1SSteffen Klassert 294b145425fSEric Dumazet base->rb_root = RB_ROOT; 295b145425fSEric Dumazet base->total = 0; 2965faa5df1SSteffen Klassert } 2975faa5df1SSteffen Klassert EXPORT_SYMBOL(inetpeer_invalidate_tree); 298