1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * IPv6 fragment reassembly for connection tracking
4  *
5  * Copyright (C)2004 USAGI/WIDE Project
6  *
7  * Author:
8  *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
9  *
10  * Based on: net/ipv6/reassembly.c
11  */
12 
13 #define pr_fmt(fmt) "IPv6-nf: " fmt
14 
15 #include <linux/errno.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/ipv6.h>
21 #include <linux/slab.h>
22 
23 #include <net/ipv6_frag.h>
24 
25 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
26 #include <linux/sysctl.h>
27 #include <linux/netfilter.h>
28 #include <linux/netfilter_ipv6.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
32 #include <net/netns/generic.h>
33 
34 static const char nf_frags_cache_name[] = "nf-frags";
35 
36 static unsigned int nf_frag_pernet_id __read_mostly;
37 static struct inet_frags nf_frags;
38 
39 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
40 {
41 	return net_generic(net, nf_frag_pernet_id);
42 }
43 
44 #ifdef CONFIG_SYSCTL
45 
46 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
47 	{
48 		.procname	= "nf_conntrack_frag6_timeout",
49 		.maxlen		= sizeof(unsigned int),
50 		.mode		= 0644,
51 		.proc_handler	= proc_dointvec_jiffies,
52 	},
53 	{
54 		.procname	= "nf_conntrack_frag6_low_thresh",
55 		.maxlen		= sizeof(unsigned long),
56 		.mode		= 0644,
57 		.proc_handler	= proc_doulongvec_minmax,
58 	},
59 	{
60 		.procname	= "nf_conntrack_frag6_high_thresh",
61 		.maxlen		= sizeof(unsigned long),
62 		.mode		= 0644,
63 		.proc_handler	= proc_doulongvec_minmax,
64 	},
65 	{ }
66 };
67 
68 static int nf_ct_frag6_sysctl_register(struct net *net)
69 {
70 	struct nft_ct_frag6_pernet *nf_frag;
71 	struct ctl_table *table;
72 	struct ctl_table_header *hdr;
73 
74 	table = nf_ct_frag6_sysctl_table;
75 	if (!net_eq(net, &init_net)) {
76 		table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
77 				GFP_KERNEL);
78 		if (table == NULL)
79 			goto err_alloc;
80 	}
81 
82 	nf_frag = nf_frag_pernet(net);
83 
84 	table[0].data	= &nf_frag->fqdir->timeout;
85 	table[1].data	= &nf_frag->fqdir->low_thresh;
86 	table[1].extra2	= &nf_frag->fqdir->high_thresh;
87 	table[2].data	= &nf_frag->fqdir->high_thresh;
88 	table[2].extra1	= &nf_frag->fqdir->low_thresh;
89 	table[2].extra2	= &nf_frag->fqdir->high_thresh;
90 
91 	hdr = register_net_sysctl(net, "net/netfilter", table);
92 	if (hdr == NULL)
93 		goto err_reg;
94 
95 	nf_frag->nf_frag_frags_hdr = hdr;
96 	return 0;
97 
98 err_reg:
99 	if (!net_eq(net, &init_net))
100 		kfree(table);
101 err_alloc:
102 	return -ENOMEM;
103 }
104 
105 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
106 {
107 	struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
108 	struct ctl_table *table;
109 
110 	table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
111 	unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
112 	if (!net_eq(net, &init_net))
113 		kfree(table);
114 }
115 
116 #else
117 static int nf_ct_frag6_sysctl_register(struct net *net)
118 {
119 	return 0;
120 }
121 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
122 {
123 }
124 #endif
125 
126 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
127 			     struct sk_buff *prev_tail, struct net_device *dev);
128 
129 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
130 {
131 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
132 }
133 
134 static void nf_ct_frag6_expire(struct timer_list *t)
135 {
136 	struct inet_frag_queue *frag = from_timer(frag, t, timer);
137 	struct frag_queue *fq;
138 
139 	fq = container_of(frag, struct frag_queue, q);
140 
141 	ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
142 }
143 
144 /* Creation primitives. */
145 static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
146 				  const struct ipv6hdr *hdr, int iif)
147 {
148 	struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
149 	struct frag_v6_compare_key key = {
150 		.id = id,
151 		.saddr = hdr->saddr,
152 		.daddr = hdr->daddr,
153 		.user = user,
154 		.iif = iif,
155 	};
156 	struct inet_frag_queue *q;
157 
158 	q = inet_frag_find(nf_frag->fqdir, &key);
159 	if (!q)
160 		return NULL;
161 
162 	return container_of(q, struct frag_queue, q);
163 }
164 
165 
166 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
167 			     const struct frag_hdr *fhdr, int nhoff)
168 {
169 	unsigned int payload_len;
170 	struct net_device *dev;
171 	struct sk_buff *prev;
172 	int offset, end, err;
173 	u8 ecn;
174 
175 	if (fq->q.flags & INET_FRAG_COMPLETE) {
176 		pr_debug("Already completed\n");
177 		goto err;
178 	}
179 
180 	payload_len = ntohs(ipv6_hdr(skb)->payload_len);
181 
182 	offset = ntohs(fhdr->frag_off) & ~0x7;
183 	end = offset + (payload_len -
184 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
185 
186 	if ((unsigned int)end > IPV6_MAXPLEN) {
187 		pr_debug("offset is too large.\n");
188 		return -EINVAL;
189 	}
190 
191 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
192 
193 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
194 		const unsigned char *nh = skb_network_header(skb);
195 		skb->csum = csum_sub(skb->csum,
196 				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
197 						  0));
198 	}
199 
200 	/* Is this the final fragment? */
201 	if (!(fhdr->frag_off & htons(IP6_MF))) {
202 		/* If we already have some bits beyond end
203 		 * or have different end, the segment is corrupted.
204 		 */
205 		if (end < fq->q.len ||
206 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
207 			pr_debug("already received last fragment\n");
208 			goto err;
209 		}
210 		fq->q.flags |= INET_FRAG_LAST_IN;
211 		fq->q.len = end;
212 	} else {
213 		/* Check if the fragment is rounded to 8 bytes.
214 		 * Required by the RFC.
215 		 */
216 		if (end & 0x7) {
217 			/* RFC2460 says always send parameter problem in
218 			 * this case. -DaveM
219 			 */
220 			pr_debug("end of fragment not rounded to 8 bytes.\n");
221 			inet_frag_kill(&fq->q);
222 			return -EPROTO;
223 		}
224 		if (end > fq->q.len) {
225 			/* Some bits beyond end -> corruption. */
226 			if (fq->q.flags & INET_FRAG_LAST_IN) {
227 				pr_debug("last packet already reached.\n");
228 				goto err;
229 			}
230 			fq->q.len = end;
231 		}
232 	}
233 
234 	if (end == offset)
235 		goto err;
236 
237 	/* Point into the IP datagram 'data' part. */
238 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
239 		pr_debug("queue: message is too short.\n");
240 		goto err;
241 	}
242 	if (pskb_trim_rcsum(skb, end - offset)) {
243 		pr_debug("Can't trim\n");
244 		goto err;
245 	}
246 
247 	/* Note : skb->rbnode and skb->dev share the same location. */
248 	dev = skb->dev;
249 	/* Makes sure compiler wont do silly aliasing games */
250 	barrier();
251 
252 	prev = fq->q.fragments_tail;
253 	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
254 	if (err) {
255 		if (err == IPFRAG_DUP) {
256 			/* No error for duplicates, pretend they got queued. */
257 			kfree_skb(skb);
258 			return -EINPROGRESS;
259 		}
260 		goto insert_error;
261 	}
262 
263 	if (dev)
264 		fq->iif = dev->ifindex;
265 
266 	fq->q.stamp = skb->tstamp;
267 	fq->q.mono_delivery_time = skb->mono_delivery_time;
268 	fq->q.meat += skb->len;
269 	fq->ecn |= ecn;
270 	if (payload_len > fq->q.max_size)
271 		fq->q.max_size = payload_len;
272 	add_frag_mem_limit(fq->q.fqdir, skb->truesize);
273 
274 	/* The first fragment.
275 	 * nhoffset is obtained from the first fragment, of course.
276 	 */
277 	if (offset == 0) {
278 		fq->nhoffset = nhoff;
279 		fq->q.flags |= INET_FRAG_FIRST_IN;
280 	}
281 
282 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
283 	    fq->q.meat == fq->q.len) {
284 		unsigned long orefdst = skb->_skb_refdst;
285 
286 		skb->_skb_refdst = 0UL;
287 		err = nf_ct_frag6_reasm(fq, skb, prev, dev);
288 		skb->_skb_refdst = orefdst;
289 
290 		/* After queue has assumed skb ownership, only 0 or
291 		 * -EINPROGRESS must be returned.
292 		 */
293 		return err ? -EINPROGRESS : 0;
294 	}
295 
296 	skb_dst_drop(skb);
297 	return -EINPROGRESS;
298 
299 insert_error:
300 	inet_frag_kill(&fq->q);
301 err:
302 	skb_dst_drop(skb);
303 	return -EINVAL;
304 }
305 
306 /*
307  *	Check if this packet is complete.
308  *
309  *	It is called with locked fq, and caller must check that
310  *	queue is eligible for reassembly i.e. it is not COMPLETE,
311  *	the last and the first frames arrived and all the bits are here.
312  */
313 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
314 			     struct sk_buff *prev_tail, struct net_device *dev)
315 {
316 	void *reasm_data;
317 	int payload_len;
318 	u8 ecn;
319 
320 	inet_frag_kill(&fq->q);
321 
322 	ecn = ip_frag_ecn_table[fq->ecn];
323 	if (unlikely(ecn == 0xff))
324 		goto err;
325 
326 	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
327 	if (!reasm_data)
328 		goto err;
329 
330 	payload_len = ((skb->data - skb_network_header(skb)) -
331 		       sizeof(struct ipv6hdr) + fq->q.len -
332 		       sizeof(struct frag_hdr));
333 	if (payload_len > IPV6_MAXPLEN) {
334 		net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
335 				    payload_len);
336 		goto err;
337 	}
338 
339 	/* We have to remove fragment header from datagram and to relocate
340 	 * header in order to calculate ICV correctly. */
341 	skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
342 	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
343 		(skb->data - skb->head) - sizeof(struct frag_hdr));
344 	skb->mac_header += sizeof(struct frag_hdr);
345 	skb->network_header += sizeof(struct frag_hdr);
346 
347 	skb_reset_transport_header(skb);
348 
349 	inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
350 
351 	skb->ignore_df = 1;
352 	skb->dev = dev;
353 	ipv6_hdr(skb)->payload_len = htons(payload_len);
354 	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
355 	IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
356 	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
357 
358 	/* Yes, and fold redundant checksum back. 8) */
359 	if (skb->ip_summed == CHECKSUM_COMPLETE)
360 		skb->csum = csum_partial(skb_network_header(skb),
361 					 skb_network_header_len(skb),
362 					 skb->csum);
363 
364 	fq->q.rb_fragments = RB_ROOT;
365 	fq->q.fragments_tail = NULL;
366 	fq->q.last_run_head = NULL;
367 
368 	return 0;
369 
370 err:
371 	inet_frag_kill(&fq->q);
372 	return -EINVAL;
373 }
374 
375 /*
376  * find the header just before Fragment Header.
377  *
378  * if success return 0 and set ...
379  * (*prevhdrp): the value of "Next Header Field" in the header
380  *		just before Fragment Header.
381  * (*prevhoff): the offset of "Next Header Field" in the header
382  *		just before Fragment Header.
383  * (*fhoff)   : the offset of Fragment Header.
384  *
385  * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
386  *
387  */
388 static int
389 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
390 {
391 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
392 	const int netoff = skb_network_offset(skb);
393 	u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
394 	int start = netoff + sizeof(struct ipv6hdr);
395 	int len = skb->len - start;
396 	u8 prevhdr = NEXTHDR_IPV6;
397 
398 	while (nexthdr != NEXTHDR_FRAGMENT) {
399 		struct ipv6_opt_hdr hdr;
400 		int hdrlen;
401 
402 		if (!ipv6_ext_hdr(nexthdr)) {
403 			return -1;
404 		}
405 		if (nexthdr == NEXTHDR_NONE) {
406 			pr_debug("next header is none\n");
407 			return -1;
408 		}
409 		if (len < (int)sizeof(struct ipv6_opt_hdr)) {
410 			pr_debug("too short\n");
411 			return -1;
412 		}
413 		if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
414 			BUG();
415 		if (nexthdr == NEXTHDR_AUTH)
416 			hdrlen = ipv6_authlen(&hdr);
417 		else
418 			hdrlen = ipv6_optlen(&hdr);
419 
420 		prevhdr = nexthdr;
421 		prev_nhoff = start;
422 
423 		nexthdr = hdr.nexthdr;
424 		len -= hdrlen;
425 		start += hdrlen;
426 	}
427 
428 	if (len < 0)
429 		return -1;
430 
431 	*prevhdrp = prevhdr;
432 	*prevhoff = prev_nhoff;
433 	*fhoff = start;
434 
435 	return 0;
436 }
437 
438 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
439 {
440 	u16 savethdr = skb->transport_header;
441 	u8 nexthdr = NEXTHDR_FRAGMENT;
442 	int fhoff, nhoff, ret;
443 	struct frag_hdr *fhdr;
444 	struct frag_queue *fq;
445 	struct ipv6hdr *hdr;
446 	u8 prevhdr;
447 
448 	/* Jumbo payload inhibits frag. header */
449 	if (ipv6_hdr(skb)->payload_len == 0) {
450 		pr_debug("payload len = 0\n");
451 		return 0;
452 	}
453 
454 	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
455 		return 0;
456 
457 	/* Discard the first fragment if it does not include all headers
458 	 * RFC 8200, Section 4.5
459 	 */
460 	if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
461 		pr_debug("Drop incomplete fragment\n");
462 		return 0;
463 	}
464 
465 	if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
466 		return -ENOMEM;
467 
468 	skb_set_transport_header(skb, fhoff);
469 	hdr = ipv6_hdr(skb);
470 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
471 
472 	skb_orphan(skb);
473 	fq = fq_find(net, fhdr->identification, user, hdr,
474 		     skb->dev ? skb->dev->ifindex : 0);
475 	if (fq == NULL) {
476 		pr_debug("Can't find and can't create new queue\n");
477 		return -ENOMEM;
478 	}
479 
480 	spin_lock_bh(&fq->q.lock);
481 
482 	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
483 	if (ret == -EPROTO) {
484 		skb->transport_header = savethdr;
485 		ret = 0;
486 	}
487 
488 	spin_unlock_bh(&fq->q.lock);
489 	inet_frag_put(&fq->q);
490 	return ret;
491 }
492 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
493 
494 static int nf_ct_net_init(struct net *net)
495 {
496 	struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
497 	int res;
498 
499 	res = fqdir_init(&nf_frag->fqdir, &nf_frags, net);
500 	if (res < 0)
501 		return res;
502 
503 	nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
504 	nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
505 	nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT;
506 
507 	res = nf_ct_frag6_sysctl_register(net);
508 	if (res < 0)
509 		fqdir_exit(nf_frag->fqdir);
510 	return res;
511 }
512 
513 static void nf_ct_net_pre_exit(struct net *net)
514 {
515 	struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
516 
517 	fqdir_pre_exit(nf_frag->fqdir);
518 }
519 
520 static void nf_ct_net_exit(struct net *net)
521 {
522 	struct nft_ct_frag6_pernet *nf_frag  = nf_frag_pernet(net);
523 
524 	nf_ct_frags6_sysctl_unregister(net);
525 	fqdir_exit(nf_frag->fqdir);
526 }
527 
528 static struct pernet_operations nf_ct_net_ops = {
529 	.init		= nf_ct_net_init,
530 	.pre_exit	= nf_ct_net_pre_exit,
531 	.exit		= nf_ct_net_exit,
532 	.id		= &nf_frag_pernet_id,
533 	.size		= sizeof(struct nft_ct_frag6_pernet),
534 };
535 
536 static const struct rhashtable_params nfct_rhash_params = {
537 	.head_offset		= offsetof(struct inet_frag_queue, node),
538 	.hashfn			= ip6frag_key_hashfn,
539 	.obj_hashfn		= ip6frag_obj_hashfn,
540 	.obj_cmpfn		= ip6frag_obj_cmpfn,
541 	.automatic_shrinking	= true,
542 };
543 
544 int nf_ct_frag6_init(void)
545 {
546 	int ret = 0;
547 
548 	nf_frags.constructor = ip6frag_init;
549 	nf_frags.destructor = NULL;
550 	nf_frags.qsize = sizeof(struct frag_queue);
551 	nf_frags.frag_expire = nf_ct_frag6_expire;
552 	nf_frags.frags_cache_name = nf_frags_cache_name;
553 	nf_frags.rhash_params = nfct_rhash_params;
554 	ret = inet_frags_init(&nf_frags);
555 	if (ret)
556 		goto out;
557 	ret = register_pernet_subsys(&nf_ct_net_ops);
558 	if (ret)
559 		inet_frags_fini(&nf_frags);
560 
561 out:
562 	return ret;
563 }
564 
565 void nf_ct_frag6_cleanup(void)
566 {
567 	unregister_pernet_subsys(&nf_ct_net_ops);
568 	inet_frags_fini(&nf_frags);
569 }
570