1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 fragment reassembly for connection tracking
4 *
5 * Copyright (C)2004 USAGI/WIDE Project
6 *
7 * Author:
8 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
9 *
10 * Based on: net/ipv6/reassembly.c
11 */
12
13 #define pr_fmt(fmt) "IPv6-nf: " fmt
14
15 #include <linux/errno.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/ipv6.h>
21 #include <linux/slab.h>
22
23 #include <net/ipv6_frag.h>
24
25 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
26 #include <linux/sysctl.h>
27 #include <linux/netfilter.h>
28 #include <linux/netfilter_ipv6.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
32 #include <net/netns/generic.h>
33
34 static const char nf_frags_cache_name[] = "nf-frags";
35
36 static unsigned int nf_frag_pernet_id __read_mostly;
37 static struct inet_frags nf_frags;
38
nf_frag_pernet(struct net * net)39 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
40 {
41 return net_generic(net, nf_frag_pernet_id);
42 }
43
44 #ifdef CONFIG_SYSCTL
45
46 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
47 {
48 .procname = "nf_conntrack_frag6_timeout",
49 .maxlen = sizeof(unsigned int),
50 .mode = 0644,
51 .proc_handler = proc_dointvec_jiffies,
52 },
53 {
54 .procname = "nf_conntrack_frag6_low_thresh",
55 .maxlen = sizeof(unsigned long),
56 .mode = 0644,
57 .proc_handler = proc_doulongvec_minmax,
58 },
59 {
60 .procname = "nf_conntrack_frag6_high_thresh",
61 .maxlen = sizeof(unsigned long),
62 .mode = 0644,
63 .proc_handler = proc_doulongvec_minmax,
64 },
65 { }
66 };
67
nf_ct_frag6_sysctl_register(struct net * net)68 static int nf_ct_frag6_sysctl_register(struct net *net)
69 {
70 struct nft_ct_frag6_pernet *nf_frag;
71 struct ctl_table *table;
72 struct ctl_table_header *hdr;
73
74 table = nf_ct_frag6_sysctl_table;
75 if (!net_eq(net, &init_net)) {
76 table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
77 GFP_KERNEL);
78 if (table == NULL)
79 goto err_alloc;
80 }
81
82 nf_frag = nf_frag_pernet(net);
83
84 table[0].data = &nf_frag->fqdir->timeout;
85 table[1].data = &nf_frag->fqdir->low_thresh;
86 table[1].extra2 = &nf_frag->fqdir->high_thresh;
87 table[2].data = &nf_frag->fqdir->high_thresh;
88 table[2].extra1 = &nf_frag->fqdir->low_thresh;
89
90 hdr = register_net_sysctl_sz(net, "net/netfilter", table,
91 ARRAY_SIZE(nf_ct_frag6_sysctl_table));
92 if (hdr == NULL)
93 goto err_reg;
94
95 nf_frag->nf_frag_frags_hdr = hdr;
96 return 0;
97
98 err_reg:
99 if (!net_eq(net, &init_net))
100 kfree(table);
101 err_alloc:
102 return -ENOMEM;
103 }
104
nf_ct_frags6_sysctl_unregister(struct net * net)105 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
106 {
107 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
108 struct ctl_table *table;
109
110 table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
111 unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
112 if (!net_eq(net, &init_net))
113 kfree(table);
114 }
115
116 #else
nf_ct_frag6_sysctl_register(struct net * net)117 static int nf_ct_frag6_sysctl_register(struct net *net)
118 {
119 return 0;
120 }
nf_ct_frags6_sysctl_unregister(struct net * net)121 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
122 {
123 }
124 #endif
125
126 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
127 struct sk_buff *prev_tail, struct net_device *dev);
128
ip6_frag_ecn(const struct ipv6hdr * ipv6h)129 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
130 {
131 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
132 }
133
nf_ct_frag6_expire(struct timer_list * t)134 static void nf_ct_frag6_expire(struct timer_list *t)
135 {
136 struct inet_frag_queue *frag = from_timer(frag, t, timer);
137 struct frag_queue *fq;
138
139 fq = container_of(frag, struct frag_queue, q);
140
141 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
142 }
143
144 /* Creation primitives. */
fq_find(struct net * net,__be32 id,u32 user,const struct ipv6hdr * hdr,int iif)145 static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
146 const struct ipv6hdr *hdr, int iif)
147 {
148 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
149 struct frag_v6_compare_key key = {
150 .id = id,
151 .saddr = hdr->saddr,
152 .daddr = hdr->daddr,
153 .user = user,
154 .iif = iif,
155 };
156 struct inet_frag_queue *q;
157
158 q = inet_frag_find(nf_frag->fqdir, &key);
159 if (!q)
160 return NULL;
161
162 return container_of(q, struct frag_queue, q);
163 }
164
165
nf_ct_frag6_queue(struct frag_queue * fq,struct sk_buff * skb,const struct frag_hdr * fhdr,int nhoff)166 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
167 const struct frag_hdr *fhdr, int nhoff)
168 {
169 unsigned int payload_len;
170 struct net_device *dev;
171 struct sk_buff *prev;
172 int offset, end, err;
173 u8 ecn;
174
175 if (fq->q.flags & INET_FRAG_COMPLETE) {
176 pr_debug("Already completed\n");
177 goto err;
178 }
179
180 payload_len = ntohs(ipv6_hdr(skb)->payload_len);
181
182 offset = ntohs(fhdr->frag_off) & ~0x7;
183 end = offset + (payload_len -
184 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
185
186 if ((unsigned int)end > IPV6_MAXPLEN) {
187 pr_debug("offset is too large.\n");
188 return -EINVAL;
189 }
190
191 ecn = ip6_frag_ecn(ipv6_hdr(skb));
192
193 if (skb->ip_summed == CHECKSUM_COMPLETE) {
194 const unsigned char *nh = skb_network_header(skb);
195 skb->csum = csum_sub(skb->csum,
196 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
197 0));
198 }
199
200 /* Is this the final fragment? */
201 if (!(fhdr->frag_off & htons(IP6_MF))) {
202 /* If we already have some bits beyond end
203 * or have different end, the segment is corrupted.
204 */
205 if (end < fq->q.len ||
206 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
207 pr_debug("already received last fragment\n");
208 goto err;
209 }
210 fq->q.flags |= INET_FRAG_LAST_IN;
211 fq->q.len = end;
212 } else {
213 /* Check if the fragment is rounded to 8 bytes.
214 * Required by the RFC.
215 */
216 if (end & 0x7) {
217 /* RFC2460 says always send parameter problem in
218 * this case. -DaveM
219 */
220 pr_debug("end of fragment not rounded to 8 bytes.\n");
221 inet_frag_kill(&fq->q);
222 return -EPROTO;
223 }
224 if (end > fq->q.len) {
225 /* Some bits beyond end -> corruption. */
226 if (fq->q.flags & INET_FRAG_LAST_IN) {
227 pr_debug("last packet already reached.\n");
228 goto err;
229 }
230 fq->q.len = end;
231 }
232 }
233
234 if (end == offset)
235 goto err;
236
237 /* Point into the IP datagram 'data' part. */
238 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
239 pr_debug("queue: message is too short.\n");
240 goto err;
241 }
242 if (pskb_trim_rcsum(skb, end - offset)) {
243 pr_debug("Can't trim\n");
244 goto err;
245 }
246
247 /* Note : skb->rbnode and skb->dev share the same location. */
248 dev = skb->dev;
249 /* Makes sure compiler wont do silly aliasing games */
250 barrier();
251
252 prev = fq->q.fragments_tail;
253 err = inet_frag_queue_insert(&fq->q, skb, offset, end);
254 if (err) {
255 if (err == IPFRAG_DUP) {
256 /* No error for duplicates, pretend they got queued. */
257 kfree_skb_reason(skb, SKB_DROP_REASON_DUP_FRAG);
258 return -EINPROGRESS;
259 }
260 goto insert_error;
261 }
262
263 if (dev)
264 fq->iif = dev->ifindex;
265
266 fq->q.stamp = skb->tstamp;
267 fq->q.mono_delivery_time = skb->mono_delivery_time;
268 fq->q.meat += skb->len;
269 fq->ecn |= ecn;
270 if (payload_len > fq->q.max_size)
271 fq->q.max_size = payload_len;
272 add_frag_mem_limit(fq->q.fqdir, skb->truesize);
273
274 /* The first fragment.
275 * nhoffset is obtained from the first fragment, of course.
276 */
277 if (offset == 0) {
278 fq->nhoffset = nhoff;
279 fq->q.flags |= INET_FRAG_FIRST_IN;
280 }
281
282 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
283 fq->q.meat == fq->q.len) {
284 unsigned long orefdst = skb->_skb_refdst;
285
286 skb->_skb_refdst = 0UL;
287 err = nf_ct_frag6_reasm(fq, skb, prev, dev);
288 skb->_skb_refdst = orefdst;
289
290 /* After queue has assumed skb ownership, only 0 or
291 * -EINPROGRESS must be returned.
292 */
293 return err ? -EINPROGRESS : 0;
294 }
295
296 skb_dst_drop(skb);
297 skb_orphan(skb);
298 return -EINPROGRESS;
299
300 insert_error:
301 inet_frag_kill(&fq->q);
302 err:
303 skb_dst_drop(skb);
304 return -EINVAL;
305 }
306
307 /*
308 * Check if this packet is complete.
309 *
310 * It is called with locked fq, and caller must check that
311 * queue is eligible for reassembly i.e. it is not COMPLETE,
312 * the last and the first frames arrived and all the bits are here.
313 */
nf_ct_frag6_reasm(struct frag_queue * fq,struct sk_buff * skb,struct sk_buff * prev_tail,struct net_device * dev)314 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
315 struct sk_buff *prev_tail, struct net_device *dev)
316 {
317 void *reasm_data;
318 int payload_len;
319 u8 ecn;
320
321 inet_frag_kill(&fq->q);
322
323 ecn = ip_frag_ecn_table[fq->ecn];
324 if (unlikely(ecn == 0xff))
325 goto err;
326
327 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
328 if (!reasm_data)
329 goto err;
330
331 payload_len = ((skb->data - skb_network_header(skb)) -
332 sizeof(struct ipv6hdr) + fq->q.len -
333 sizeof(struct frag_hdr));
334 if (payload_len > IPV6_MAXPLEN) {
335 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
336 payload_len);
337 goto err;
338 }
339
340 /* We have to remove fragment header from datagram and to relocate
341 * header in order to calculate ICV correctly. */
342 skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
343 memmove(skb->head + sizeof(struct frag_hdr), skb->head,
344 (skb->data - skb->head) - sizeof(struct frag_hdr));
345 skb->mac_header += sizeof(struct frag_hdr);
346 skb->network_header += sizeof(struct frag_hdr);
347
348 skb_reset_transport_header(skb);
349
350 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
351
352 skb->ignore_df = 1;
353 skb->dev = dev;
354 ipv6_hdr(skb)->payload_len = htons(payload_len);
355 ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
356 IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
357 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
358
359 /* Yes, and fold redundant checksum back. 8) */
360 if (skb->ip_summed == CHECKSUM_COMPLETE)
361 skb->csum = csum_partial(skb_network_header(skb),
362 skb_network_header_len(skb),
363 skb->csum);
364
365 fq->q.rb_fragments = RB_ROOT;
366 fq->q.fragments_tail = NULL;
367 fq->q.last_run_head = NULL;
368
369 return 0;
370
371 err:
372 inet_frag_kill(&fq->q);
373 return -EINVAL;
374 }
375
376 /*
377 * find the header just before Fragment Header.
378 *
379 * if success return 0 and set ...
380 * (*prevhdrp): the value of "Next Header Field" in the header
381 * just before Fragment Header.
382 * (*prevhoff): the offset of "Next Header Field" in the header
383 * just before Fragment Header.
384 * (*fhoff) : the offset of Fragment Header.
385 *
386 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
387 *
388 */
389 static int
find_prev_fhdr(struct sk_buff * skb,u8 * prevhdrp,int * prevhoff,int * fhoff)390 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
391 {
392 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
393 const int netoff = skb_network_offset(skb);
394 u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
395 int start = netoff + sizeof(struct ipv6hdr);
396 int len = skb->len - start;
397 u8 prevhdr = NEXTHDR_IPV6;
398
399 while (nexthdr != NEXTHDR_FRAGMENT) {
400 struct ipv6_opt_hdr hdr;
401 int hdrlen;
402
403 if (!ipv6_ext_hdr(nexthdr)) {
404 return -1;
405 }
406 if (nexthdr == NEXTHDR_NONE) {
407 pr_debug("next header is none\n");
408 return -1;
409 }
410 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
411 pr_debug("too short\n");
412 return -1;
413 }
414 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
415 BUG();
416 if (nexthdr == NEXTHDR_AUTH)
417 hdrlen = ipv6_authlen(&hdr);
418 else
419 hdrlen = ipv6_optlen(&hdr);
420
421 prevhdr = nexthdr;
422 prev_nhoff = start;
423
424 nexthdr = hdr.nexthdr;
425 len -= hdrlen;
426 start += hdrlen;
427 }
428
429 if (len < 0)
430 return -1;
431
432 *prevhdrp = prevhdr;
433 *prevhoff = prev_nhoff;
434 *fhoff = start;
435
436 return 0;
437 }
438
nf_ct_frag6_gather(struct net * net,struct sk_buff * skb,u32 user)439 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
440 {
441 u16 savethdr = skb->transport_header;
442 u8 nexthdr = NEXTHDR_FRAGMENT;
443 int fhoff, nhoff, ret;
444 struct frag_hdr *fhdr;
445 struct frag_queue *fq;
446 struct ipv6hdr *hdr;
447 u8 prevhdr;
448
449 /* Jumbo payload inhibits frag. header */
450 if (ipv6_hdr(skb)->payload_len == 0) {
451 pr_debug("payload len = 0\n");
452 return 0;
453 }
454
455 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
456 return 0;
457
458 /* Discard the first fragment if it does not include all headers
459 * RFC 8200, Section 4.5
460 */
461 if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
462 pr_debug("Drop incomplete fragment\n");
463 return 0;
464 }
465
466 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
467 return -ENOMEM;
468
469 skb_set_transport_header(skb, fhoff);
470 hdr = ipv6_hdr(skb);
471 fhdr = (struct frag_hdr *)skb_transport_header(skb);
472
473 fq = fq_find(net, fhdr->identification, user, hdr,
474 skb->dev ? skb->dev->ifindex : 0);
475 if (fq == NULL) {
476 pr_debug("Can't find and can't create new queue\n");
477 return -ENOMEM;
478 }
479
480 spin_lock_bh(&fq->q.lock);
481
482 ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
483 if (ret == -EPROTO) {
484 skb->transport_header = savethdr;
485 ret = 0;
486 }
487
488 spin_unlock_bh(&fq->q.lock);
489 inet_frag_put(&fq->q);
490 return ret;
491 }
492 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
493
nf_ct_net_init(struct net * net)494 static int nf_ct_net_init(struct net *net)
495 {
496 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
497 int res;
498
499 res = fqdir_init(&nf_frag->fqdir, &nf_frags, net);
500 if (res < 0)
501 return res;
502
503 nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
504 nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
505 nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT;
506
507 res = nf_ct_frag6_sysctl_register(net);
508 if (res < 0)
509 fqdir_exit(nf_frag->fqdir);
510 return res;
511 }
512
nf_ct_net_pre_exit(struct net * net)513 static void nf_ct_net_pre_exit(struct net *net)
514 {
515 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
516
517 fqdir_pre_exit(nf_frag->fqdir);
518 }
519
nf_ct_net_exit(struct net * net)520 static void nf_ct_net_exit(struct net *net)
521 {
522 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
523
524 nf_ct_frags6_sysctl_unregister(net);
525 fqdir_exit(nf_frag->fqdir);
526 }
527
528 static struct pernet_operations nf_ct_net_ops = {
529 .init = nf_ct_net_init,
530 .pre_exit = nf_ct_net_pre_exit,
531 .exit = nf_ct_net_exit,
532 .id = &nf_frag_pernet_id,
533 .size = sizeof(struct nft_ct_frag6_pernet),
534 };
535
536 static const struct rhashtable_params nfct_rhash_params = {
537 .head_offset = offsetof(struct inet_frag_queue, node),
538 .hashfn = ip6frag_key_hashfn,
539 .obj_hashfn = ip6frag_obj_hashfn,
540 .obj_cmpfn = ip6frag_obj_cmpfn,
541 .automatic_shrinking = true,
542 };
543
nf_ct_frag6_init(void)544 int nf_ct_frag6_init(void)
545 {
546 int ret = 0;
547
548 nf_frags.constructor = ip6frag_init;
549 nf_frags.destructor = NULL;
550 nf_frags.qsize = sizeof(struct frag_queue);
551 nf_frags.frag_expire = nf_ct_frag6_expire;
552 nf_frags.frags_cache_name = nf_frags_cache_name;
553 nf_frags.rhash_params = nfct_rhash_params;
554 ret = inet_frags_init(&nf_frags);
555 if (ret)
556 goto out;
557 ret = register_pernet_subsys(&nf_ct_net_ops);
558 if (ret)
559 inet_frags_fini(&nf_frags);
560
561 out:
562 return ret;
563 }
564
nf_ct_frag6_cleanup(void)565 void nf_ct_frag6_cleanup(void)
566 {
567 unregister_pernet_subsys(&nf_ct_net_ops);
568 inet_frags_fini(&nf_frags);
569 }
570