1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * This source is covered by the GNU GPL, the same as all kernel sources. 5 * 6 * Authors: Andrey V. Savochkin <saw@msu.ru> 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <linux/interrupt.h> 14 #include <linux/spinlock.h> 15 #include <linux/random.h> 16 #include <linux/timer.h> 17 #include <linux/time.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/net.h> 21 #include <linux/workqueue.h> 22 #include <net/ip.h> 23 #include <net/inetpeer.h> 24 #include <net/secure_seq.h> 25 26 /* 27 * Theory of operations. 28 * We keep one entry for each peer IP address. The nodes contains long-living 29 * information about the peer which doesn't depend on routes. 30 * 31 * Nodes are removed only when reference counter goes to 0. 32 * When it's happened the node may be removed when a sufficient amount of 33 * time has been passed since its last use. The less-recently-used entry can 34 * also be removed if the pool is overloaded i.e. if the total amount of 35 * entries is greater-or-equal than the threshold. 36 * 37 * Node pool is organised as an RB tree. 38 * Such an implementation has been chosen not just for fun. It's a way to 39 * prevent easy and efficient DoS attacks by creating hash collisions. A huge 40 * amount of long living nodes in a single hash slot would significantly delay 41 * lookups performed with disabled BHs. 42 * 43 * Serialisation issues. 44 * 1. Nodes may appear in the tree only with the pool lock held. 45 * 2. Nodes may disappear from the tree only with the pool lock held 46 * AND reference count being 0. 47 * 3. Global variable peer_total is modified under the pool lock. 48 * 4. struct inet_peer fields modification: 49 * rb_node: pool lock 50 * refcnt: atomically against modifications on other CPU; 51 * usually under some other lock to prevent node disappearing 52 * daddr: unchangeable 53 */ 54 55 static struct kmem_cache *peer_cachep __ro_after_init; 56 57 void inet_peer_base_init(struct inet_peer_base *bp) 58 { 59 bp->rb_root = RB_ROOT; 60 seqlock_init(&bp->lock); 61 bp->total = 0; 62 } 63 EXPORT_SYMBOL_GPL(inet_peer_base_init); 64 65 #define PEER_MAX_GC 32 66 67 /* Exported for sysctl_net_ipv4. */ 68 int inet_peer_threshold __read_mostly; /* start to throw entries more 69 * aggressively at this stage */ 70 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 71 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 72 73 /* Called from ip_output.c:ip_init */ 74 void __init inet_initpeers(void) 75 { 76 u64 nr_entries; 77 78 /* 1% of physical memory */ 79 nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT, 80 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer))); 81 82 inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128); 83 84 peer_cachep = kmem_cache_create("inet_peer_cache", 85 sizeof(struct inet_peer), 86 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 87 NULL); 88 } 89 90 /* Called with rcu_read_lock() or base->lock held */ 91 static struct inet_peer *lookup(const struct inetpeer_addr *daddr, 92 struct inet_peer_base *base, 93 unsigned int seq, 94 struct inet_peer *gc_stack[], 95 unsigned int *gc_cnt, 96 struct rb_node **parent_p, 97 struct rb_node ***pp_p) 98 { 99 struct rb_node **pp, *parent, *next; 100 struct inet_peer *p; 101 102 pp = &base->rb_root.rb_node; 103 parent = NULL; 104 while (1) { 105 int cmp; 106 107 next = rcu_dereference_raw(*pp); 108 if (!next) 109 break; 110 parent = next; 111 p = rb_entry(parent, struct inet_peer, rb_node); 112 cmp = inetpeer_addr_cmp(daddr, &p->daddr); 113 if (cmp == 0) { 114 if (!refcount_inc_not_zero(&p->refcnt)) 115 break; 116 return p; 117 } 118 if (gc_stack) { 119 if (*gc_cnt < PEER_MAX_GC) 120 gc_stack[(*gc_cnt)++] = p; 121 } else if (unlikely(read_seqretry(&base->lock, seq))) { 122 break; 123 } 124 if (cmp == -1) 125 pp = &next->rb_left; 126 else 127 pp = &next->rb_right; 128 } 129 *parent_p = parent; 130 *pp_p = pp; 131 return NULL; 132 } 133 134 static void inetpeer_free_rcu(struct rcu_head *head) 135 { 136 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); 137 } 138 139 /* perform garbage collect on all items stacked during a lookup */ 140 static void inet_peer_gc(struct inet_peer_base *base, 141 struct inet_peer *gc_stack[], 142 unsigned int gc_cnt) 143 { 144 int peer_threshold, peer_maxttl, peer_minttl; 145 struct inet_peer *p; 146 __u32 delta, ttl; 147 int i; 148 149 peer_threshold = READ_ONCE(inet_peer_threshold); 150 peer_maxttl = READ_ONCE(inet_peer_maxttl); 151 peer_minttl = READ_ONCE(inet_peer_minttl); 152 153 if (base->total >= peer_threshold) 154 ttl = 0; /* be aggressive */ 155 else 156 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * 157 base->total / peer_threshold * HZ; 158 for (i = 0; i < gc_cnt; i++) { 159 p = gc_stack[i]; 160 161 /* The READ_ONCE() pairs with the WRITE_ONCE() 162 * in inet_putpeer() 163 */ 164 delta = (__u32)jiffies - READ_ONCE(p->dtime); 165 166 if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) 167 gc_stack[i] = NULL; 168 } 169 for (i = 0; i < gc_cnt; i++) { 170 p = gc_stack[i]; 171 if (p) { 172 rb_erase(&p->rb_node, &base->rb_root); 173 base->total--; 174 call_rcu(&p->rcu, inetpeer_free_rcu); 175 } 176 } 177 } 178 179 struct inet_peer *inet_getpeer(struct inet_peer_base *base, 180 const struct inetpeer_addr *daddr, 181 int create) 182 { 183 struct inet_peer *p, *gc_stack[PEER_MAX_GC]; 184 struct rb_node **pp, *parent; 185 unsigned int gc_cnt, seq; 186 int invalidated; 187 188 /* Attempt a lockless lookup first. 189 * Because of a concurrent writer, we might not find an existing entry. 190 */ 191 rcu_read_lock(); 192 seq = read_seqbegin(&base->lock); 193 p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); 194 invalidated = read_seqretry(&base->lock, seq); 195 rcu_read_unlock(); 196 197 if (p) 198 return p; 199 200 /* If no writer did a change during our lookup, we can return early. */ 201 if (!create && !invalidated) 202 return NULL; 203 204 /* retry an exact lookup, taking the lock before. 205 * At least, nodes should be hot in our cache. 206 */ 207 parent = NULL; 208 write_seqlock_bh(&base->lock); 209 210 gc_cnt = 0; 211 p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); 212 if (!p && create) { 213 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); 214 if (p) { 215 p->daddr = *daddr; 216 p->dtime = (__u32)jiffies; 217 refcount_set(&p->refcnt, 2); 218 atomic_set(&p->rid, 0); 219 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 220 p->rate_tokens = 0; 221 p->n_redirects = 0; 222 /* 60*HZ is arbitrary, but chosen enough high so that the first 223 * calculation of tokens is at its maximum. 224 */ 225 p->rate_last = jiffies - 60*HZ; 226 227 rb_link_node(&p->rb_node, parent, pp); 228 rb_insert_color(&p->rb_node, &base->rb_root); 229 base->total++; 230 } 231 } 232 if (gc_cnt) 233 inet_peer_gc(base, gc_stack, gc_cnt); 234 write_sequnlock_bh(&base->lock); 235 236 return p; 237 } 238 EXPORT_SYMBOL_GPL(inet_getpeer); 239 240 void inet_putpeer(struct inet_peer *p) 241 { 242 /* The WRITE_ONCE() pairs with itself (we run lockless) 243 * and the READ_ONCE() in inet_peer_gc() 244 */ 245 WRITE_ONCE(p->dtime, (__u32)jiffies); 246 247 if (refcount_dec_and_test(&p->refcnt)) 248 call_rcu(&p->rcu, inetpeer_free_rcu); 249 } 250 EXPORT_SYMBOL_GPL(inet_putpeer); 251 252 /* 253 * Check transmit rate limitation for given message. 254 * The rate information is held in the inet_peer entries now. 255 * This function is generic and could be used for other purposes 256 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. 257 * 258 * Note that the same inet_peer fields are modified by functions in 259 * route.c too, but these work for packet destinations while xrlim_allow 260 * works for icmp destinations. This means the rate limiting information 261 * for one "ip object" is shared - and these ICMPs are twice limited: 262 * by source and by destination. 263 * 264 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate 265 * SHOULD allow setting of rate limits 266 * 267 * Shared between ICMPv4 and ICMPv6. 268 */ 269 #define XRLIM_BURST_FACTOR 6 270 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) 271 { 272 unsigned long now, token; 273 bool rc = false; 274 275 if (!peer) 276 return true; 277 278 token = peer->rate_tokens; 279 now = jiffies; 280 token += now - peer->rate_last; 281 peer->rate_last = now; 282 if (token > XRLIM_BURST_FACTOR * timeout) 283 token = XRLIM_BURST_FACTOR * timeout; 284 if (token >= timeout) { 285 token -= timeout; 286 rc = true; 287 } 288 peer->rate_tokens = token; 289 return rc; 290 } 291 EXPORT_SYMBOL(inet_peer_xrlim_allow); 292 293 void inetpeer_invalidate_tree(struct inet_peer_base *base) 294 { 295 struct rb_node *p = rb_first(&base->rb_root); 296 297 while (p) { 298 struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node); 299 300 p = rb_next(p); 301 rb_erase(&peer->rb_node, &base->rb_root); 302 inet_putpeer(peer); 303 cond_resched(); 304 } 305 306 base->total = 0; 307 } 308 EXPORT_SYMBOL(inetpeer_invalidate_tree); 309