xref: /openbmc/linux/net/ipv4/inet_fragment.c (revision 093ba72914b696521e4885756a68a3332782c8de)
1 /*
2  * inet fragments management
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
10  *				Started as consolidation of ipv4/ip_fragment.c,
11  *				ipv6/reassembly. and ipv6 nf conntrack reassembly
12  */
13 
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23 
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27 
28 #define INETFRAGS_EVICT_BUCKETS   128
29 #define INETFRAGS_EVICT_MAX	  512
30 
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33 
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35  * Value : 0xff if frame should be dropped.
36  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37  */
38 const u8 ip_frag_ecn_table[16] = {
39 	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
41 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
42 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
43 
44 	/* invalid combinations : drop frame */
45 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52 };
53 EXPORT_SYMBOL(ip_frag_ecn_table);
54 
55 static unsigned int
56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57 {
58 	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59 }
60 
61 static bool inet_frag_may_rebuild(struct inet_frags *f)
62 {
63 	return time_after(jiffies,
64 	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65 }
66 
67 static void inet_frag_secret_rebuild(struct inet_frags *f)
68 {
69 	int i;
70 
71 	write_seqlock_bh(&f->rnd_seqlock);
72 
73 	if (!inet_frag_may_rebuild(f))
74 		goto out;
75 
76 	get_random_bytes(&f->rnd, sizeof(u32));
77 
78 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79 		struct inet_frag_bucket *hb;
80 		struct inet_frag_queue *q;
81 		struct hlist_node *n;
82 
83 		hb = &f->hash[i];
84 		spin_lock(&hb->chain_lock);
85 
86 		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87 			unsigned int hval = inet_frag_hashfn(f, q);
88 
89 			if (hval != i) {
90 				struct inet_frag_bucket *hb_dest;
91 
92 				hlist_del(&q->list);
93 
94 				/* Relink to new hash chain. */
95 				hb_dest = &f->hash[hval];
96 
97 				/* This is the only place where we take
98 				 * another chain_lock while already holding
99 				 * one.  As this will not run concurrently,
100 				 * we cannot deadlock on hb_dest lock below, if its
101 				 * already locked it will be released soon since
102 				 * other caller cannot be waiting for hb lock
103 				 * that we've taken above.
104 				 */
105 				spin_lock_nested(&hb_dest->chain_lock,
106 						 SINGLE_DEPTH_NESTING);
107 				hlist_add_head(&q->list, &hb_dest->chain);
108 				spin_unlock(&hb_dest->chain_lock);
109 			}
110 		}
111 		spin_unlock(&hb->chain_lock);
112 	}
113 
114 	f->rebuild = false;
115 	f->last_rebuild_jiffies = jiffies;
116 out:
117 	write_sequnlock_bh(&f->rnd_seqlock);
118 }
119 
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121 {
122 	if (!hlist_unhashed(&q->list_evictor))
123 		return false;
124 
125 	return q->net->low_thresh == 0 ||
126 	       frag_mem_limit(q->net) >= q->net->low_thresh;
127 }
128 
129 static unsigned int
130 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 {
132 	struct inet_frag_queue *fq;
133 	struct hlist_node *n;
134 	unsigned int evicted = 0;
135 	HLIST_HEAD(expired);
136 
137 	spin_lock(&hb->chain_lock);
138 
139 	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
140 		if (!inet_fragq_should_evict(fq))
141 			continue;
142 
143 		if (!del_timer(&fq->timer))
144 			continue;
145 
146 		hlist_add_head(&fq->list_evictor, &expired);
147 		++evicted;
148 	}
149 
150 	spin_unlock(&hb->chain_lock);
151 
152 	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
153 		f->frag_expire(&fq->timer);
154 
155 	return evicted;
156 }
157 
158 static void inet_frag_worker(struct work_struct *work)
159 {
160 	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
161 	unsigned int i, evicted = 0;
162 	struct inet_frags *f;
163 
164 	f = container_of(work, struct inet_frags, frags_work);
165 
166 	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
167 
168 	local_bh_disable();
169 
170 	for (i = READ_ONCE(f->next_bucket); budget; --budget) {
171 		evicted += inet_evict_bucket(f, &f->hash[i]);
172 		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
173 		if (evicted > INETFRAGS_EVICT_MAX)
174 			break;
175 	}
176 
177 	f->next_bucket = i;
178 
179 	local_bh_enable();
180 
181 	if (f->rebuild && inet_frag_may_rebuild(f))
182 		inet_frag_secret_rebuild(f);
183 }
184 
185 static void inet_frag_schedule_worker(struct inet_frags *f)
186 {
187 	if (unlikely(!work_pending(&f->frags_work)))
188 		schedule_work(&f->frags_work);
189 }
190 
191 int inet_frags_init(struct inet_frags *f)
192 {
193 	int i;
194 
195 	INIT_WORK(&f->frags_work, inet_frag_worker);
196 
197 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
198 		struct inet_frag_bucket *hb = &f->hash[i];
199 
200 		spin_lock_init(&hb->chain_lock);
201 		INIT_HLIST_HEAD(&hb->chain);
202 	}
203 
204 	seqlock_init(&f->rnd_seqlock);
205 	f->last_rebuild_jiffies = 0;
206 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
207 					    NULL);
208 	if (!f->frags_cachep)
209 		return -ENOMEM;
210 
211 	return 0;
212 }
213 EXPORT_SYMBOL(inet_frags_init);
214 
215 void inet_frags_fini(struct inet_frags *f)
216 {
217 	cancel_work_sync(&f->frags_work);
218 	kmem_cache_destroy(f->frags_cachep);
219 }
220 EXPORT_SYMBOL(inet_frags_fini);
221 
222 void inet_frags_exit_net(struct netns_frags *nf)
223 {
224 	struct inet_frags *f =nf->f;
225 	unsigned int seq;
226 	int i;
227 
228 	nf->low_thresh = 0;
229 
230 evict_again:
231 	local_bh_disable();
232 	seq = read_seqbegin(&f->rnd_seqlock);
233 
234 	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
235 		inet_evict_bucket(f, &f->hash[i]);
236 
237 	local_bh_enable();
238 	cond_resched();
239 
240 	if (read_seqretry(&f->rnd_seqlock, seq) ||
241 	    sum_frag_mem_limit(nf))
242 		goto evict_again;
243 }
244 EXPORT_SYMBOL(inet_frags_exit_net);
245 
246 static struct inet_frag_bucket *
247 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
248 __acquires(hb->chain_lock)
249 {
250 	struct inet_frag_bucket *hb;
251 	unsigned int seq, hash;
252 
253  restart:
254 	seq = read_seqbegin(&f->rnd_seqlock);
255 
256 	hash = inet_frag_hashfn(f, fq);
257 	hb = &f->hash[hash];
258 
259 	spin_lock(&hb->chain_lock);
260 	if (read_seqretry(&f->rnd_seqlock, seq)) {
261 		spin_unlock(&hb->chain_lock);
262 		goto restart;
263 	}
264 
265 	return hb;
266 }
267 
268 static inline void fq_unlink(struct inet_frag_queue *fq)
269 {
270 	struct inet_frag_bucket *hb;
271 
272 	hb = get_frag_bucket_locked(fq, fq->net->f);
273 	hlist_del(&fq->list);
274 	fq->flags |= INET_FRAG_COMPLETE;
275 	spin_unlock(&hb->chain_lock);
276 }
277 
278 void inet_frag_kill(struct inet_frag_queue *fq)
279 {
280 	if (del_timer(&fq->timer))
281 		refcount_dec(&fq->refcnt);
282 
283 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
284 		fq_unlink(fq);
285 		refcount_dec(&fq->refcnt);
286 	}
287 }
288 EXPORT_SYMBOL(inet_frag_kill);
289 
290 void inet_frag_destroy(struct inet_frag_queue *q)
291 {
292 	struct sk_buff *fp;
293 	struct netns_frags *nf;
294 	unsigned int sum, sum_truesize = 0;
295 	struct inet_frags *f;
296 
297 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
298 	WARN_ON(del_timer(&q->timer) != 0);
299 
300 	/* Release all fragment data. */
301 	fp = q->fragments;
302 	nf = q->net;
303 	f = nf->f;
304 	while (fp) {
305 		struct sk_buff *xp = fp->next;
306 
307 		sum_truesize += fp->truesize;
308 		kfree_skb(fp);
309 		fp = xp;
310 	}
311 	sum = sum_truesize + f->qsize;
312 
313 	if (f->destructor)
314 		f->destructor(q);
315 	kmem_cache_free(f->frags_cachep, q);
316 
317 	sub_frag_mem_limit(nf, sum);
318 }
319 EXPORT_SYMBOL(inet_frag_destroy);
320 
321 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
322 						struct inet_frag_queue *qp_in,
323 						struct inet_frags *f,
324 						void *arg)
325 {
326 	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
327 	struct inet_frag_queue *qp;
328 
329 #ifdef CONFIG_SMP
330 	/* With SMP race we have to recheck hash table, because
331 	 * such entry could have been created on other cpu before
332 	 * we acquired hash bucket lock.
333 	 */
334 	hlist_for_each_entry(qp, &hb->chain, list) {
335 		if (qp->net == nf && f->match(qp, arg)) {
336 			refcount_inc(&qp->refcnt);
337 			spin_unlock(&hb->chain_lock);
338 			qp_in->flags |= INET_FRAG_COMPLETE;
339 			inet_frag_put(qp_in);
340 			return qp;
341 		}
342 	}
343 #endif
344 	qp = qp_in;
345 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
346 		refcount_inc(&qp->refcnt);
347 
348 	refcount_inc(&qp->refcnt);
349 	hlist_add_head(&qp->list, &hb->chain);
350 
351 	spin_unlock(&hb->chain_lock);
352 
353 	return qp;
354 }
355 
356 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
357 					       struct inet_frags *f,
358 					       void *arg)
359 {
360 	struct inet_frag_queue *q;
361 
362 	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
363 		inet_frag_schedule_worker(f);
364 		return NULL;
365 	}
366 
367 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
368 	if (!q)
369 		return NULL;
370 
371 	q->net = nf;
372 	f->constructor(q, arg);
373 	add_frag_mem_limit(nf, f->qsize);
374 
375 	timer_setup(&q->timer, f->frag_expire, 0);
376 	spin_lock_init(&q->lock);
377 	refcount_set(&q->refcnt, 1);
378 
379 	return q;
380 }
381 
382 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
383 						struct inet_frags *f,
384 						void *arg)
385 {
386 	struct inet_frag_queue *q;
387 
388 	q = inet_frag_alloc(nf, f, arg);
389 	if (!q)
390 		return NULL;
391 
392 	return inet_frag_intern(nf, q, f, arg);
393 }
394 
395 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
396 				       struct inet_frags *f, void *key,
397 				       unsigned int hash)
398 {
399 	struct inet_frag_bucket *hb;
400 	struct inet_frag_queue *q;
401 	int depth = 0;
402 
403 	if (frag_mem_limit(nf) > nf->low_thresh)
404 		inet_frag_schedule_worker(f);
405 
406 	hash &= (INETFRAGS_HASHSZ - 1);
407 	hb = &f->hash[hash];
408 
409 	spin_lock(&hb->chain_lock);
410 	hlist_for_each_entry(q, &hb->chain, list) {
411 		if (q->net == nf && f->match(q, key)) {
412 			refcount_inc(&q->refcnt);
413 			spin_unlock(&hb->chain_lock);
414 			return q;
415 		}
416 		depth++;
417 	}
418 	spin_unlock(&hb->chain_lock);
419 
420 	if (depth <= INETFRAGS_MAXDEPTH)
421 		return inet_frag_create(nf, f, key);
422 
423 	if (inet_frag_may_rebuild(f)) {
424 		if (!f->rebuild)
425 			f->rebuild = true;
426 		inet_frag_schedule_worker(f);
427 	}
428 
429 	return ERR_PTR(-ENOBUFS);
430 }
431 EXPORT_SYMBOL(inet_frag_find);
432 
433 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
434 				   const char *prefix)
435 {
436 	static const char msg[] = "inet_frag_find: Fragment hash bucket"
437 		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
438 		". Dropping fragment.\n";
439 
440 	if (PTR_ERR(q) == -ENOBUFS)
441 		net_dbg_ratelimited("%s%s", prefix, msg);
442 }
443 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
444