xref: /openbmc/linux/net/ipv4/inetpeer.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock.h>
15 #include <linux/random.h>
16 #include <linux/timer.h>
17 #include <linux/time.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/net.h>
21 #include <linux/workqueue.h>
22 #include <net/ip.h>
23 #include <net/inetpeer.h>
24 #include <net/secure_seq.h>
25 
26 /*
27  *  Theory of operations.
28  *  We keep one entry for each peer IP address.  The nodes contains long-living
29  *  information about the peer which doesn't depend on routes.
30  *
31  *  Nodes are removed only when reference counter goes to 0.
32  *  When it's happened the node may be removed when a sufficient amount of
33  *  time has been passed since its last use.  The less-recently-used entry can
34  *  also be removed if the pool is overloaded i.e. if the total amount of
35  *  entries is greater-or-equal than the threshold.
36  *
37  *  Node pool is organised as an RB tree.
38  *  Such an implementation has been chosen not just for fun.  It's a way to
39  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
40  *  amount of long living nodes in a single hash slot would significantly delay
41  *  lookups performed with disabled BHs.
42  *
43  *  Serialisation issues.
44  *  1.  Nodes may appear in the tree only with the pool lock held.
45  *  2.  Nodes may disappear from the tree only with the pool lock held
46  *      AND reference count being 0.
47  *  3.  Global variable peer_total is modified under the pool lock.
48  *  4.  struct inet_peer fields modification:
49  *		rb_node: pool lock
50  *		refcnt: atomically against modifications on other CPU;
51  *		   usually under some other lock to prevent node disappearing
52  *		daddr: unchangeable
53  */
54 
55 static struct kmem_cache *peer_cachep __ro_after_init;
56 
inet_peer_base_init(struct inet_peer_base * bp)57 void inet_peer_base_init(struct inet_peer_base *bp)
58 {
59 	bp->rb_root = RB_ROOT;
60 	seqlock_init(&bp->lock);
61 	bp->total = 0;
62 }
63 EXPORT_SYMBOL_GPL(inet_peer_base_init);
64 
65 #define PEER_MAX_GC 32
66 
67 /* Exported for sysctl_net_ipv4.  */
68 int inet_peer_threshold __read_mostly;	/* start to throw entries more
69 					 * aggressively at this stage */
70 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
71 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
72 
73 /* Called from ip_output.c:ip_init  */
inet_initpeers(void)74 void __init inet_initpeers(void)
75 {
76 	u64 nr_entries;
77 
78 	 /* 1% of physical memory */
79 	nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
80 			      100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
81 
82 	inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
83 
84 	peer_cachep = kmem_cache_create("inet_peer_cache",
85 			sizeof(struct inet_peer),
86 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
87 			NULL);
88 }
89 
90 /* Called with rcu_read_lock() or base->lock held */
lookup(const struct inetpeer_addr * daddr,struct inet_peer_base * base,unsigned int seq,struct inet_peer * gc_stack[],unsigned int * gc_cnt,struct rb_node ** parent_p,struct rb_node *** pp_p)91 static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
92 				struct inet_peer_base *base,
93 				unsigned int seq,
94 				struct inet_peer *gc_stack[],
95 				unsigned int *gc_cnt,
96 				struct rb_node **parent_p,
97 				struct rb_node ***pp_p)
98 {
99 	struct rb_node **pp, *parent, *next;
100 	struct inet_peer *p;
101 	u32 now;
102 
103 	pp = &base->rb_root.rb_node;
104 	parent = NULL;
105 	while (1) {
106 		int cmp;
107 
108 		next = rcu_dereference_raw(*pp);
109 		if (!next)
110 			break;
111 		parent = next;
112 		p = rb_entry(parent, struct inet_peer, rb_node);
113 		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
114 		if (cmp == 0) {
115 			now = jiffies;
116 			if (READ_ONCE(p->dtime) != now)
117 				WRITE_ONCE(p->dtime, now);
118 			return p;
119 		}
120 		if (gc_stack) {
121 			if (*gc_cnt < PEER_MAX_GC)
122 				gc_stack[(*gc_cnt)++] = p;
123 		} else if (unlikely(read_seqretry(&base->lock, seq))) {
124 			break;
125 		}
126 		if (cmp == -1)
127 			pp = &next->rb_left;
128 		else
129 			pp = &next->rb_right;
130 	}
131 	*parent_p = parent;
132 	*pp_p = pp;
133 	return NULL;
134 }
135 
inetpeer_free_rcu(struct rcu_head * head)136 static void inetpeer_free_rcu(struct rcu_head *head)
137 {
138 	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
139 }
140 
141 /* perform garbage collect on all items stacked during a lookup */
inet_peer_gc(struct inet_peer_base * base,struct inet_peer * gc_stack[],unsigned int gc_cnt)142 static void inet_peer_gc(struct inet_peer_base *base,
143 			 struct inet_peer *gc_stack[],
144 			 unsigned int gc_cnt)
145 {
146 	int peer_threshold, peer_maxttl, peer_minttl;
147 	struct inet_peer *p;
148 	__u32 delta, ttl;
149 	int i;
150 
151 	peer_threshold = READ_ONCE(inet_peer_threshold);
152 	peer_maxttl = READ_ONCE(inet_peer_maxttl);
153 	peer_minttl = READ_ONCE(inet_peer_minttl);
154 
155 	if (base->total >= peer_threshold)
156 		ttl = 0; /* be aggressive */
157 	else
158 		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
159 			base->total / peer_threshold * HZ;
160 	for (i = 0; i < gc_cnt; i++) {
161 		p = gc_stack[i];
162 
163 		delta = (__u32)jiffies - READ_ONCE(p->dtime);
164 
165 		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
166 			gc_stack[i] = NULL;
167 	}
168 	for (i = 0; i < gc_cnt; i++) {
169 		p = gc_stack[i];
170 		if (p) {
171 			rb_erase(&p->rb_node, &base->rb_root);
172 			base->total--;
173 			call_rcu(&p->rcu, inetpeer_free_rcu);
174 		}
175 	}
176 }
177 
178 /* Must be called under RCU : No refcount change is done here. */
inet_getpeer(struct inet_peer_base * base,const struct inetpeer_addr * daddr)179 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
180 			       const struct inetpeer_addr *daddr)
181 {
182 	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
183 	struct rb_node **pp, *parent;
184 	unsigned int gc_cnt, seq;
185 
186 	/* Attempt a lockless lookup first.
187 	 * Because of a concurrent writer, we might not find an existing entry.
188 	 */
189 	seq = read_seqbegin(&base->lock);
190 	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
191 
192 	if (p)
193 		return p;
194 
195 	/* retry an exact lookup, taking the lock before.
196 	 * At least, nodes should be hot in our cache.
197 	 */
198 	parent = NULL;
199 	write_seqlock_bh(&base->lock);
200 
201 	gc_cnt = 0;
202 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
203 	if (!p) {
204 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
205 		if (p) {
206 			p->daddr = *daddr;
207 			p->dtime = (__u32)jiffies;
208 			refcount_set(&p->refcnt, 1);
209 			atomic_set(&p->rid, 0);
210 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
211 			p->rate_tokens = 0;
212 			p->n_redirects = 0;
213 			/* 60*HZ is arbitrary, but chosen enough high so that the first
214 			 * calculation of tokens is at its maximum.
215 			 */
216 			p->rate_last = jiffies - 60*HZ;
217 
218 			rb_link_node(&p->rb_node, parent, pp);
219 			rb_insert_color(&p->rb_node, &base->rb_root);
220 			base->total++;
221 		}
222 	}
223 	if (gc_cnt)
224 		inet_peer_gc(base, gc_stack, gc_cnt);
225 	write_sequnlock_bh(&base->lock);
226 
227 	return p;
228 }
229 EXPORT_SYMBOL_GPL(inet_getpeer);
230 
inet_putpeer(struct inet_peer * p)231 void inet_putpeer(struct inet_peer *p)
232 {
233 	if (refcount_dec_and_test(&p->refcnt))
234 		call_rcu(&p->rcu, inetpeer_free_rcu);
235 }
236 
237 /*
238  *	Check transmit rate limitation for given message.
239  *	The rate information is held in the inet_peer entries now.
240  *	This function is generic and could be used for other purposes
241  *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
242  *
243  *	Note that the same inet_peer fields are modified by functions in
244  *	route.c too, but these work for packet destinations while xrlim_allow
245  *	works for icmp destinations. This means the rate limiting information
246  *	for one "ip object" is shared - and these ICMPs are twice limited:
247  *	by source and by destination.
248  *
249  *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
250  *			  SHOULD allow setting of rate limits
251  *
252  * 	Shared between ICMPv4 and ICMPv6.
253  */
254 #define XRLIM_BURST_FACTOR 6
inet_peer_xrlim_allow(struct inet_peer * peer,int timeout)255 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
256 {
257 	unsigned long now, token;
258 	bool rc = false;
259 
260 	if (!peer)
261 		return true;
262 
263 	token = peer->rate_tokens;
264 	now = jiffies;
265 	token += now - peer->rate_last;
266 	peer->rate_last = now;
267 	if (token > XRLIM_BURST_FACTOR * timeout)
268 		token = XRLIM_BURST_FACTOR * timeout;
269 	if (token >= timeout) {
270 		token -= timeout;
271 		rc = true;
272 	}
273 	peer->rate_tokens = token;
274 	return rc;
275 }
276 EXPORT_SYMBOL(inet_peer_xrlim_allow);
277 
inetpeer_invalidate_tree(struct inet_peer_base * base)278 void inetpeer_invalidate_tree(struct inet_peer_base *base)
279 {
280 	struct rb_node *p = rb_first(&base->rb_root);
281 
282 	while (p) {
283 		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
284 
285 		p = rb_next(p);
286 		rb_erase(&peer->rb_node, &base->rb_root);
287 		inet_putpeer(peer);
288 		cond_resched();
289 	}
290 
291 	base->total = 0;
292 }
293 EXPORT_SYMBOL(inetpeer_invalidate_tree);
294