1 /*	6LoWPAN fragment reassembly
2  *
3  *
4  *	Authors:
5  *	Alexander Aring		<aar@pengutronix.de>
6  *
7  *	Based on: net/ipv6/reassembly.c
8  *
9  *	This program is free software; you can redistribute it and/or
10  *	modify it under the terms of the GNU General Public License
11  *	as published by the Free Software Foundation; either version
12  *	2 of the License, or (at your option) any later version.
13  */
14 
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
16 
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
28 #include <net/ipv6_frag.h>
29 #include <net/inet_frag.h>
30 
31 #include "6lowpan_i.h"
32 
33 static const char lowpan_frags_cache_name[] = "lowpan-frags";
34 
35 static struct inet_frags lowpan_frags;
36 
37 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
38 			     struct sk_buff *prev, struct net_device *ldev);
39 
40 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
41 {
42 	const struct frag_lowpan_compare_key *key = a;
43 
44 	BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
45 	memcpy(&q->key, key, sizeof(*key));
46 }
47 
48 static void lowpan_frag_expire(struct timer_list *t)
49 {
50 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
51 	struct frag_queue *fq;
52 
53 	fq = container_of(frag, struct frag_queue, q);
54 
55 	spin_lock(&fq->q.lock);
56 
57 	if (fq->q.flags & INET_FRAG_COMPLETE)
58 		goto out;
59 
60 	inet_frag_kill(&fq->q);
61 out:
62 	spin_unlock(&fq->q.lock);
63 	inet_frag_put(&fq->q);
64 }
65 
66 static inline struct lowpan_frag_queue *
67 fq_find(struct net *net, const struct lowpan_802154_cb *cb,
68 	const struct ieee802154_addr *src,
69 	const struct ieee802154_addr *dst)
70 {
71 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
72 		net_ieee802154_lowpan(net);
73 	struct frag_lowpan_compare_key key = {};
74 	struct inet_frag_queue *q;
75 
76 	key.tag = cb->d_tag;
77 	key.d_size = cb->d_size;
78 	key.src = *src;
79 	key.dst = *dst;
80 
81 	q = inet_frag_find(&ieee802154_lowpan->frags, &key);
82 	if (!q)
83 		return NULL;
84 
85 	return container_of(q, struct lowpan_frag_queue, q);
86 }
87 
88 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
89 			     struct sk_buff *skb, u8 frag_type)
90 {
91 	struct sk_buff *prev, *next;
92 	struct net_device *ldev;
93 	int end, offset;
94 
95 	if (fq->q.flags & INET_FRAG_COMPLETE)
96 		goto err;
97 
98 	offset = lowpan_802154_cb(skb)->d_offset << 3;
99 	end = lowpan_802154_cb(skb)->d_size;
100 
101 	/* Is this the final fragment? */
102 	if (offset + skb->len == end) {
103 		/* If we already have some bits beyond end
104 		 * or have different end, the segment is corrupted.
105 		 */
106 		if (end < fq->q.len ||
107 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
108 			goto err;
109 		fq->q.flags |= INET_FRAG_LAST_IN;
110 		fq->q.len = end;
111 	} else {
112 		if (end > fq->q.len) {
113 			/* Some bits beyond end -> corruption. */
114 			if (fq->q.flags & INET_FRAG_LAST_IN)
115 				goto err;
116 			fq->q.len = end;
117 		}
118 	}
119 
120 	/* Find out which fragments are in front and at the back of us
121 	 * in the chain of fragments so far.  We must know where to put
122 	 * this fragment, right?
123 	 */
124 	prev = fq->q.fragments_tail;
125 	if (!prev ||
126 	    lowpan_802154_cb(prev)->d_offset <
127 	    lowpan_802154_cb(skb)->d_offset) {
128 		next = NULL;
129 		goto found;
130 	}
131 	prev = NULL;
132 	for (next = fq->q.fragments; next != NULL; next = next->next) {
133 		if (lowpan_802154_cb(next)->d_offset >=
134 		    lowpan_802154_cb(skb)->d_offset)
135 			break;	/* bingo! */
136 		prev = next;
137 	}
138 
139 found:
140 	/* Insert this fragment in the chain of fragments. */
141 	skb->next = next;
142 	if (!next)
143 		fq->q.fragments_tail = skb;
144 	if (prev)
145 		prev->next = skb;
146 	else
147 		fq->q.fragments = skb;
148 
149 	ldev = skb->dev;
150 	if (ldev)
151 		skb->dev = NULL;
152 
153 	fq->q.stamp = skb->tstamp;
154 	if (frag_type == LOWPAN_DISPATCH_FRAG1)
155 		fq->q.flags |= INET_FRAG_FIRST_IN;
156 
157 	fq->q.meat += skb->len;
158 	add_frag_mem_limit(fq->q.net, skb->truesize);
159 
160 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
161 	    fq->q.meat == fq->q.len) {
162 		int res;
163 		unsigned long orefdst = skb->_skb_refdst;
164 
165 		skb->_skb_refdst = 0UL;
166 		res = lowpan_frag_reasm(fq, prev, ldev);
167 		skb->_skb_refdst = orefdst;
168 		return res;
169 	}
170 
171 	return -1;
172 err:
173 	kfree_skb(skb);
174 	return -1;
175 }
176 
177 /*	Check if this packet is complete.
178  *	Returns NULL on failure by any reason, and pointer
179  *	to current nexthdr field in reassembled frame.
180  *
181  *	It is called with locked fq, and caller must check that
182  *	queue is eligible for reassembly i.e. it is not COMPLETE,
183  *	the last and the first frames arrived and all the bits are here.
184  */
185 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
186 			     struct net_device *ldev)
187 {
188 	struct sk_buff *fp, *head = fq->q.fragments;
189 	int sum_truesize;
190 
191 	inet_frag_kill(&fq->q);
192 
193 	/* Make the one we just received the head. */
194 	if (prev) {
195 		head = prev->next;
196 		fp = skb_clone(head, GFP_ATOMIC);
197 
198 		if (!fp)
199 			goto out_oom;
200 
201 		fp->next = head->next;
202 		if (!fp->next)
203 			fq->q.fragments_tail = fp;
204 		prev->next = fp;
205 
206 		skb_morph(head, fq->q.fragments);
207 		head->next = fq->q.fragments->next;
208 
209 		consume_skb(fq->q.fragments);
210 		fq->q.fragments = head;
211 	}
212 
213 	/* Head of list must not be cloned. */
214 	if (skb_unclone(head, GFP_ATOMIC))
215 		goto out_oom;
216 
217 	/* If the first fragment is fragmented itself, we split
218 	 * it to two chunks: the first with data and paged part
219 	 * and the second, holding only fragments.
220 	 */
221 	if (skb_has_frag_list(head)) {
222 		struct sk_buff *clone;
223 		int i, plen = 0;
224 
225 		clone = alloc_skb(0, GFP_ATOMIC);
226 		if (!clone)
227 			goto out_oom;
228 		clone->next = head->next;
229 		head->next = clone;
230 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
231 		skb_frag_list_init(head);
232 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
233 			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
234 		clone->len = head->data_len - plen;
235 		clone->data_len = clone->len;
236 		head->data_len -= clone->len;
237 		head->len -= clone->len;
238 		add_frag_mem_limit(fq->q.net, clone->truesize);
239 	}
240 
241 	WARN_ON(head == NULL);
242 
243 	sum_truesize = head->truesize;
244 	for (fp = head->next; fp;) {
245 		bool headstolen;
246 		int delta;
247 		struct sk_buff *next = fp->next;
248 
249 		sum_truesize += fp->truesize;
250 		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
251 			kfree_skb_partial(fp, headstolen);
252 		} else {
253 			if (!skb_shinfo(head)->frag_list)
254 				skb_shinfo(head)->frag_list = fp;
255 			head->data_len += fp->len;
256 			head->len += fp->len;
257 			head->truesize += fp->truesize;
258 		}
259 		fp = next;
260 	}
261 	sub_frag_mem_limit(fq->q.net, sum_truesize);
262 
263 	skb_mark_not_on_list(head);
264 	head->dev = ldev;
265 	head->tstamp = fq->q.stamp;
266 
267 	fq->q.fragments = NULL;
268 	fq->q.fragments_tail = NULL;
269 
270 	return 1;
271 out_oom:
272 	net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
273 	return -1;
274 }
275 
276 static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
277 					  lowpan_rx_result res)
278 {
279 	switch (res) {
280 	case RX_QUEUED:
281 		return NET_RX_SUCCESS;
282 	case RX_CONTINUE:
283 		/* nobody cared about this packet */
284 		net_warn_ratelimited("%s: received unknown dispatch\n",
285 				     __func__);
286 
287 		/* fall-through */
288 	default:
289 		/* all others failure */
290 		return NET_RX_DROP;
291 	}
292 }
293 
294 static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
295 {
296 	int ret;
297 
298 	if (!lowpan_is_iphc(*skb_network_header(skb)))
299 		return RX_CONTINUE;
300 
301 	ret = lowpan_iphc_decompress(skb);
302 	if (ret < 0)
303 		return RX_DROP;
304 
305 	return RX_QUEUED;
306 }
307 
308 static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
309 {
310 	lowpan_rx_result res;
311 
312 #define CALL_RXH(rxh)			\
313 	do {				\
314 		res = rxh(skb);	\
315 		if (res != RX_CONTINUE)	\
316 			goto rxh_next;	\
317 	} while (0)
318 
319 	/* likely at first */
320 	CALL_RXH(lowpan_frag_rx_h_iphc);
321 	CALL_RXH(lowpan_rx_h_ipv6);
322 
323 rxh_next:
324 	return lowpan_frag_rx_handlers_result(skb, res);
325 #undef CALL_RXH
326 }
327 
328 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK	0x07
329 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT	8
330 
331 static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
332 			 struct lowpan_802154_cb *cb)
333 {
334 	bool fail;
335 	u8 high = 0, low = 0;
336 	__be16 d_tag = 0;
337 
338 	fail = lowpan_fetch_skb(skb, &high, 1);
339 	fail |= lowpan_fetch_skb(skb, &low, 1);
340 	/* remove the dispatch value and use first three bits as high value
341 	 * for the datagram size
342 	 */
343 	cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
344 		LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
345 	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
346 	cb->d_tag = ntohs(d_tag);
347 
348 	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
349 		fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
350 	} else {
351 		skb_reset_network_header(skb);
352 		cb->d_offset = 0;
353 		/* check if datagram_size has ipv6hdr on FRAG1 */
354 		fail |= cb->d_size < sizeof(struct ipv6hdr);
355 		/* check if we can dereference the dispatch value */
356 		fail |= !skb->len;
357 	}
358 
359 	if (unlikely(fail))
360 		return -EIO;
361 
362 	return 0;
363 }
364 
365 int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
366 {
367 	struct lowpan_frag_queue *fq;
368 	struct net *net = dev_net(skb->dev);
369 	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
370 	struct ieee802154_hdr hdr = {};
371 	int err;
372 
373 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
374 		goto err;
375 
376 	err = lowpan_get_cb(skb, frag_type, cb);
377 	if (err < 0)
378 		goto err;
379 
380 	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
381 		err = lowpan_invoke_frag_rx_handlers(skb);
382 		if (err == NET_RX_DROP)
383 			goto err;
384 	}
385 
386 	if (cb->d_size > IPV6_MIN_MTU) {
387 		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
388 		goto err;
389 	}
390 
391 	fq = fq_find(net, cb, &hdr.source, &hdr.dest);
392 	if (fq != NULL) {
393 		int ret;
394 
395 		spin_lock(&fq->q.lock);
396 		ret = lowpan_frag_queue(fq, skb, frag_type);
397 		spin_unlock(&fq->q.lock);
398 
399 		inet_frag_put(&fq->q);
400 		return ret;
401 	}
402 
403 err:
404 	kfree_skb(skb);
405 	return -1;
406 }
407 
408 #ifdef CONFIG_SYSCTL
409 
410 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
411 	{
412 		.procname	= "6lowpanfrag_high_thresh",
413 		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
414 		.maxlen		= sizeof(unsigned long),
415 		.mode		= 0644,
416 		.proc_handler	= proc_doulongvec_minmax,
417 		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
418 	},
419 	{
420 		.procname	= "6lowpanfrag_low_thresh",
421 		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
422 		.maxlen		= sizeof(unsigned long),
423 		.mode		= 0644,
424 		.proc_handler	= proc_doulongvec_minmax,
425 		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
426 	},
427 	{
428 		.procname	= "6lowpanfrag_time",
429 		.data		= &init_net.ieee802154_lowpan.frags.timeout,
430 		.maxlen		= sizeof(int),
431 		.mode		= 0644,
432 		.proc_handler	= proc_dointvec_jiffies,
433 	},
434 	{ }
435 };
436 
437 /* secret interval has been deprecated */
438 static int lowpan_frags_secret_interval_unused;
439 static struct ctl_table lowpan_frags_ctl_table[] = {
440 	{
441 		.procname	= "6lowpanfrag_secret_interval",
442 		.data		= &lowpan_frags_secret_interval_unused,
443 		.maxlen		= sizeof(int),
444 		.mode		= 0644,
445 		.proc_handler	= proc_dointvec_jiffies,
446 	},
447 	{ }
448 };
449 
450 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
451 {
452 	struct ctl_table *table;
453 	struct ctl_table_header *hdr;
454 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
455 		net_ieee802154_lowpan(net);
456 
457 	table = lowpan_frags_ns_ctl_table;
458 	if (!net_eq(net, &init_net)) {
459 		table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
460 				GFP_KERNEL);
461 		if (table == NULL)
462 			goto err_alloc;
463 
464 		table[0].data = &ieee802154_lowpan->frags.high_thresh;
465 		table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
466 		table[1].data = &ieee802154_lowpan->frags.low_thresh;
467 		table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
468 		table[2].data = &ieee802154_lowpan->frags.timeout;
469 
470 		/* Don't export sysctls to unprivileged users */
471 		if (net->user_ns != &init_user_ns)
472 			table[0].procname = NULL;
473 	}
474 
475 	hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
476 	if (hdr == NULL)
477 		goto err_reg;
478 
479 	ieee802154_lowpan->sysctl.frags_hdr = hdr;
480 	return 0;
481 
482 err_reg:
483 	if (!net_eq(net, &init_net))
484 		kfree(table);
485 err_alloc:
486 	return -ENOMEM;
487 }
488 
489 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
490 {
491 	struct ctl_table *table;
492 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
493 		net_ieee802154_lowpan(net);
494 
495 	table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
496 	unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
497 	if (!net_eq(net, &init_net))
498 		kfree(table);
499 }
500 
501 static struct ctl_table_header *lowpan_ctl_header;
502 
503 static int __init lowpan_frags_sysctl_register(void)
504 {
505 	lowpan_ctl_header = register_net_sysctl(&init_net,
506 						"net/ieee802154/6lowpan",
507 						lowpan_frags_ctl_table);
508 	return lowpan_ctl_header == NULL ? -ENOMEM : 0;
509 }
510 
511 static void lowpan_frags_sysctl_unregister(void)
512 {
513 	unregister_net_sysctl_table(lowpan_ctl_header);
514 }
515 #else
516 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
517 {
518 	return 0;
519 }
520 
521 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
522 {
523 }
524 
525 static inline int __init lowpan_frags_sysctl_register(void)
526 {
527 	return 0;
528 }
529 
530 static inline void lowpan_frags_sysctl_unregister(void)
531 {
532 }
533 #endif
534 
535 static int __net_init lowpan_frags_init_net(struct net *net)
536 {
537 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
538 		net_ieee802154_lowpan(net);
539 	int res;
540 
541 	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
542 	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
543 	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
544 	ieee802154_lowpan->frags.f = &lowpan_frags;
545 
546 	res = inet_frags_init_net(&ieee802154_lowpan->frags);
547 	if (res < 0)
548 		return res;
549 	res = lowpan_frags_ns_sysctl_register(net);
550 	if (res < 0)
551 		inet_frags_exit_net(&ieee802154_lowpan->frags);
552 	return res;
553 }
554 
555 static void __net_exit lowpan_frags_exit_net(struct net *net)
556 {
557 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
558 		net_ieee802154_lowpan(net);
559 
560 	lowpan_frags_ns_sysctl_unregister(net);
561 	inet_frags_exit_net(&ieee802154_lowpan->frags);
562 }
563 
564 static struct pernet_operations lowpan_frags_ops = {
565 	.init = lowpan_frags_init_net,
566 	.exit = lowpan_frags_exit_net,
567 };
568 
569 static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
570 {
571 	return jhash2(data,
572 		      sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
573 }
574 
575 static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
576 {
577 	const struct inet_frag_queue *fq = data;
578 
579 	return jhash2((const u32 *)&fq->key,
580 		      sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
581 }
582 
583 static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
584 {
585 	const struct frag_lowpan_compare_key *key = arg->key;
586 	const struct inet_frag_queue *fq = ptr;
587 
588 	return !!memcmp(&fq->key, key, sizeof(*key));
589 }
590 
591 static const struct rhashtable_params lowpan_rhash_params = {
592 	.head_offset		= offsetof(struct inet_frag_queue, node),
593 	.hashfn			= lowpan_key_hashfn,
594 	.obj_hashfn		= lowpan_obj_hashfn,
595 	.obj_cmpfn		= lowpan_obj_cmpfn,
596 	.automatic_shrinking	= true,
597 };
598 
599 int __init lowpan_net_frag_init(void)
600 {
601 	int ret;
602 
603 	lowpan_frags.constructor = lowpan_frag_init;
604 	lowpan_frags.destructor = NULL;
605 	lowpan_frags.qsize = sizeof(struct frag_queue);
606 	lowpan_frags.frag_expire = lowpan_frag_expire;
607 	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
608 	lowpan_frags.rhash_params = lowpan_rhash_params;
609 	ret = inet_frags_init(&lowpan_frags);
610 	if (ret)
611 		goto out;
612 
613 	ret = lowpan_frags_sysctl_register();
614 	if (ret)
615 		goto err_sysctl;
616 
617 	ret = register_pernet_subsys(&lowpan_frags_ops);
618 	if (ret)
619 		goto err_pernet;
620 out:
621 	return ret;
622 err_pernet:
623 	lowpan_frags_sysctl_unregister();
624 err_sysctl:
625 	inet_frags_fini(&lowpan_frags);
626 	return ret;
627 }
628 
629 void lowpan_net_frag_exit(void)
630 {
631 	inet_frags_fini(&lowpan_frags);
632 	lowpan_frags_sysctl_unregister();
633 	unregister_pernet_subsys(&lowpan_frags_ops);
634 }
635