1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 fragment reassembly for connection tracking
4 *
5 * Copyright (C)2004 USAGI/WIDE Project
6 *
7 * Author:
8 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
9 *
10 * Based on: net/ipv6/reassembly.c
11 */
12
13 #define pr_fmt(fmt) "IPv6-nf: " fmt
14
15 #include <linux/errno.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/ipv6.h>
21 #include <linux/slab.h>
22
23 #include <net/ipv6_frag.h>
24
25 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
26 #include <linux/sysctl.h>
27 #include <linux/netfilter.h>
28 #include <linux/netfilter_ipv6.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
32 #include <net/netns/generic.h>
33
34 static const char nf_frags_cache_name[] = "nf-frags";
35
36 static unsigned int nf_frag_pernet_id __read_mostly;
37 static struct inet_frags nf_frags;
38
nf_frag_pernet(struct net * net)39 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
40 {
41 return net_generic(net, nf_frag_pernet_id);
42 }
43
44 #ifdef CONFIG_SYSCTL
45
46 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
47 {
48 .procname = "nf_conntrack_frag6_timeout",
49 .maxlen = sizeof(unsigned int),
50 .mode = 0644,
51 .proc_handler = proc_dointvec_jiffies,
52 },
53 {
54 .procname = "nf_conntrack_frag6_low_thresh",
55 .maxlen = sizeof(unsigned long),
56 .mode = 0644,
57 .proc_handler = proc_doulongvec_minmax,
58 },
59 {
60 .procname = "nf_conntrack_frag6_high_thresh",
61 .maxlen = sizeof(unsigned long),
62 .mode = 0644,
63 .proc_handler = proc_doulongvec_minmax,
64 },
65 { }
66 };
67
nf_ct_frag6_sysctl_register(struct net * net)68 static int nf_ct_frag6_sysctl_register(struct net *net)
69 {
70 struct nft_ct_frag6_pernet *nf_frag;
71 struct ctl_table *table;
72 struct ctl_table_header *hdr;
73
74 table = nf_ct_frag6_sysctl_table;
75 if (!net_eq(net, &init_net)) {
76 table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
77 GFP_KERNEL);
78 if (table == NULL)
79 goto err_alloc;
80 }
81
82 nf_frag = nf_frag_pernet(net);
83
84 table[0].data = &nf_frag->fqdir->timeout;
85 table[1].data = &nf_frag->fqdir->low_thresh;
86 table[1].extra2 = &nf_frag->fqdir->high_thresh;
87 table[2].data = &nf_frag->fqdir->high_thresh;
88 table[2].extra1 = &nf_frag->fqdir->low_thresh;
89
90 hdr = register_net_sysctl_sz(net, "net/netfilter", table,
91 ARRAY_SIZE(nf_ct_frag6_sysctl_table));
92 if (hdr == NULL)
93 goto err_reg;
94
95 nf_frag->nf_frag_frags_hdr = hdr;
96 return 0;
97
98 err_reg:
99 if (!net_eq(net, &init_net))
100 kfree(table);
101 err_alloc:
102 return -ENOMEM;
103 }
104
nf_ct_frags6_sysctl_unregister(struct net * net)105 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
106 {
107 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
108 struct ctl_table *table;
109
110 table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
111 unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
112 if (!net_eq(net, &init_net))
113 kfree(table);
114 }
115
116 #else
nf_ct_frag6_sysctl_register(struct net * net)117 static int nf_ct_frag6_sysctl_register(struct net *net)
118 {
119 return 0;
120 }
nf_ct_frags6_sysctl_unregister(struct net * net)121 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
122 {
123 }
124 #endif
125
126 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
127 struct sk_buff *prev_tail, struct net_device *dev);
128
ip6_frag_ecn(const struct ipv6hdr * ipv6h)129 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
130 {
131 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
132 }
133
nf_ct_frag6_expire(struct timer_list * t)134 static void nf_ct_frag6_expire(struct timer_list *t)
135 {
136 struct inet_frag_queue *frag = from_timer(frag, t, timer);
137 struct frag_queue *fq;
138
139 fq = container_of(frag, struct frag_queue, q);
140
141 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
142 }
143
144 /* Creation primitives. */
fq_find(struct net * net,__be32 id,u32 user,const struct ipv6hdr * hdr,int iif)145 static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
146 const struct ipv6hdr *hdr, int iif)
147 {
148 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
149 struct frag_v6_compare_key key = {
150 .id = id,
151 .saddr = hdr->saddr,
152 .daddr = hdr->daddr,
153 .user = user,
154 .iif = iif,
155 };
156 struct inet_frag_queue *q;
157
158 if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
159 IPV6_ADDR_LINKLOCAL)))
160 key.iif = 0;
161
162 q = inet_frag_find(nf_frag->fqdir, &key);
163 if (!q)
164 return NULL;
165
166 return container_of(q, struct frag_queue, q);
167 }
168
169
nf_ct_frag6_queue(struct frag_queue * fq,struct sk_buff * skb,const struct frag_hdr * fhdr,int nhoff)170 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
171 const struct frag_hdr *fhdr, int nhoff)
172 {
173 unsigned int payload_len;
174 struct net_device *dev;
175 struct sk_buff *prev;
176 int offset, end, err;
177 u8 ecn;
178
179 if (fq->q.flags & INET_FRAG_COMPLETE) {
180 pr_debug("Already completed\n");
181 goto err;
182 }
183
184 payload_len = ntohs(ipv6_hdr(skb)->payload_len);
185
186 offset = ntohs(fhdr->frag_off) & ~0x7;
187 end = offset + (payload_len -
188 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
189
190 if ((unsigned int)end > IPV6_MAXPLEN) {
191 pr_debug("offset is too large.\n");
192 return -EINVAL;
193 }
194
195 ecn = ip6_frag_ecn(ipv6_hdr(skb));
196
197 if (skb->ip_summed == CHECKSUM_COMPLETE) {
198 const unsigned char *nh = skb_network_header(skb);
199 skb->csum = csum_sub(skb->csum,
200 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
201 0));
202 }
203
204 /* Is this the final fragment? */
205 if (!(fhdr->frag_off & htons(IP6_MF))) {
206 /* If we already have some bits beyond end
207 * or have different end, the segment is corrupted.
208 */
209 if (end < fq->q.len ||
210 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
211 pr_debug("already received last fragment\n");
212 goto err;
213 }
214 fq->q.flags |= INET_FRAG_LAST_IN;
215 fq->q.len = end;
216 } else {
217 /* Check if the fragment is rounded to 8 bytes.
218 * Required by the RFC.
219 */
220 if (end & 0x7) {
221 /* RFC2460 says always send parameter problem in
222 * this case. -DaveM
223 */
224 pr_debug("end of fragment not rounded to 8 bytes.\n");
225 inet_frag_kill(&fq->q);
226 return -EPROTO;
227 }
228 if (end > fq->q.len) {
229 /* Some bits beyond end -> corruption. */
230 if (fq->q.flags & INET_FRAG_LAST_IN) {
231 pr_debug("last packet already reached.\n");
232 goto err;
233 }
234 fq->q.len = end;
235 }
236 }
237
238 if (end == offset)
239 goto err;
240
241 /* Point into the IP datagram 'data' part. */
242 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
243 pr_debug("queue: message is too short.\n");
244 goto err;
245 }
246 if (pskb_trim_rcsum(skb, end - offset)) {
247 pr_debug("Can't trim\n");
248 goto err;
249 }
250
251 /* Note : skb->rbnode and skb->dev share the same location. */
252 dev = skb->dev;
253 /* Makes sure compiler wont do silly aliasing games */
254 barrier();
255
256 prev = fq->q.fragments_tail;
257 err = inet_frag_queue_insert(&fq->q, skb, offset, end);
258 if (err) {
259 if (err == IPFRAG_DUP) {
260 /* No error for duplicates, pretend they got queued. */
261 kfree_skb_reason(skb, SKB_DROP_REASON_DUP_FRAG);
262 return -EINPROGRESS;
263 }
264 goto insert_error;
265 }
266
267 if (dev)
268 fq->iif = dev->ifindex;
269
270 fq->q.stamp = skb->tstamp;
271 fq->q.mono_delivery_time = skb->mono_delivery_time;
272 fq->q.meat += skb->len;
273 fq->ecn |= ecn;
274 if (payload_len > fq->q.max_size)
275 fq->q.max_size = payload_len;
276 add_frag_mem_limit(fq->q.fqdir, skb->truesize);
277
278 /* The first fragment.
279 * nhoffset is obtained from the first fragment, of course.
280 */
281 if (offset == 0) {
282 fq->nhoffset = nhoff;
283 fq->q.flags |= INET_FRAG_FIRST_IN;
284 }
285
286 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
287 fq->q.meat == fq->q.len) {
288 unsigned long orefdst = skb->_skb_refdst;
289
290 skb->_skb_refdst = 0UL;
291 err = nf_ct_frag6_reasm(fq, skb, prev, dev);
292 skb->_skb_refdst = orefdst;
293
294 /* After queue has assumed skb ownership, only 0 or
295 * -EINPROGRESS must be returned.
296 */
297 return err ? -EINPROGRESS : 0;
298 }
299
300 skb_dst_drop(skb);
301 skb_orphan(skb);
302 return -EINPROGRESS;
303
304 insert_error:
305 inet_frag_kill(&fq->q);
306 err:
307 skb_dst_drop(skb);
308 return -EINVAL;
309 }
310
311 /*
312 * Check if this packet is complete.
313 *
314 * It is called with locked fq, and caller must check that
315 * queue is eligible for reassembly i.e. it is not COMPLETE,
316 * the last and the first frames arrived and all the bits are here.
317 */
nf_ct_frag6_reasm(struct frag_queue * fq,struct sk_buff * skb,struct sk_buff * prev_tail,struct net_device * dev)318 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
319 struct sk_buff *prev_tail, struct net_device *dev)
320 {
321 void *reasm_data;
322 int payload_len;
323 u8 ecn;
324
325 inet_frag_kill(&fq->q);
326
327 ecn = ip_frag_ecn_table[fq->ecn];
328 if (unlikely(ecn == 0xff))
329 goto err;
330
331 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
332 if (!reasm_data)
333 goto err;
334
335 payload_len = ((skb->data - skb_network_header(skb)) -
336 sizeof(struct ipv6hdr) + fq->q.len -
337 sizeof(struct frag_hdr));
338 if (payload_len > IPV6_MAXPLEN) {
339 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
340 payload_len);
341 goto err;
342 }
343
344 /* We have to remove fragment header from datagram and to relocate
345 * header in order to calculate ICV correctly. */
346 skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
347 memmove(skb->head + sizeof(struct frag_hdr), skb->head,
348 (skb->data - skb->head) - sizeof(struct frag_hdr));
349 skb->mac_header += sizeof(struct frag_hdr);
350 skb->network_header += sizeof(struct frag_hdr);
351
352 skb_reset_transport_header(skb);
353
354 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
355
356 skb->ignore_df = 1;
357 skb->dev = dev;
358 ipv6_hdr(skb)->payload_len = htons(payload_len);
359 ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
360 IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
361 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
362
363 /* Yes, and fold redundant checksum back. 8) */
364 if (skb->ip_summed == CHECKSUM_COMPLETE)
365 skb->csum = csum_partial(skb_network_header(skb),
366 skb_network_header_len(skb),
367 skb->csum);
368
369 fq->q.rb_fragments = RB_ROOT;
370 fq->q.fragments_tail = NULL;
371 fq->q.last_run_head = NULL;
372
373 return 0;
374
375 err:
376 inet_frag_kill(&fq->q);
377 return -EINVAL;
378 }
379
380 /*
381 * find the header just before Fragment Header.
382 *
383 * if success return 0 and set ...
384 * (*prevhdrp): the value of "Next Header Field" in the header
385 * just before Fragment Header.
386 * (*prevhoff): the offset of "Next Header Field" in the header
387 * just before Fragment Header.
388 * (*fhoff) : the offset of Fragment Header.
389 *
390 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
391 *
392 */
393 static int
find_prev_fhdr(struct sk_buff * skb,u8 * prevhdrp,int * prevhoff,int * fhoff)394 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
395 {
396 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
397 const int netoff = skb_network_offset(skb);
398 u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
399 int start = netoff + sizeof(struct ipv6hdr);
400 int len = skb->len - start;
401 u8 prevhdr = NEXTHDR_IPV6;
402
403 while (nexthdr != NEXTHDR_FRAGMENT) {
404 struct ipv6_opt_hdr hdr;
405 int hdrlen;
406
407 if (!ipv6_ext_hdr(nexthdr)) {
408 return -1;
409 }
410 if (nexthdr == NEXTHDR_NONE) {
411 pr_debug("next header is none\n");
412 return -1;
413 }
414 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
415 pr_debug("too short\n");
416 return -1;
417 }
418 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
419 BUG();
420 if (nexthdr == NEXTHDR_AUTH)
421 hdrlen = ipv6_authlen(&hdr);
422 else
423 hdrlen = ipv6_optlen(&hdr);
424
425 prevhdr = nexthdr;
426 prev_nhoff = start;
427
428 nexthdr = hdr.nexthdr;
429 len -= hdrlen;
430 start += hdrlen;
431 }
432
433 if (len < 0)
434 return -1;
435
436 *prevhdrp = prevhdr;
437 *prevhoff = prev_nhoff;
438 *fhoff = start;
439
440 return 0;
441 }
442
nf_ct_frag6_gather(struct net * net,struct sk_buff * skb,u32 user)443 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
444 {
445 u16 savethdr = skb->transport_header;
446 u8 nexthdr = NEXTHDR_FRAGMENT;
447 int fhoff, nhoff, ret;
448 struct frag_hdr *fhdr;
449 struct frag_queue *fq;
450 struct ipv6hdr *hdr;
451 u8 prevhdr;
452
453 /* Jumbo payload inhibits frag. header */
454 if (ipv6_hdr(skb)->payload_len == 0) {
455 pr_debug("payload len = 0\n");
456 return 0;
457 }
458
459 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
460 return 0;
461
462 /* Discard the first fragment if it does not include all headers
463 * RFC 8200, Section 4.5
464 */
465 if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
466 pr_debug("Drop incomplete fragment\n");
467 return 0;
468 }
469
470 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
471 return -ENOMEM;
472
473 skb_set_transport_header(skb, fhoff);
474 hdr = ipv6_hdr(skb);
475 fhdr = (struct frag_hdr *)skb_transport_header(skb);
476
477 fq = fq_find(net, fhdr->identification, user, hdr,
478 skb->dev ? skb->dev->ifindex : 0);
479 if (fq == NULL) {
480 pr_debug("Can't find and can't create new queue\n");
481 return -ENOMEM;
482 }
483
484 spin_lock_bh(&fq->q.lock);
485
486 ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
487 if (ret == -EPROTO) {
488 skb->transport_header = savethdr;
489 ret = 0;
490 }
491
492 spin_unlock_bh(&fq->q.lock);
493 inet_frag_put(&fq->q);
494 return ret;
495 }
496 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
497
nf_ct_net_init(struct net * net)498 static int nf_ct_net_init(struct net *net)
499 {
500 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
501 int res;
502
503 res = fqdir_init(&nf_frag->fqdir, &nf_frags, net);
504 if (res < 0)
505 return res;
506
507 nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
508 nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
509 nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT;
510
511 res = nf_ct_frag6_sysctl_register(net);
512 if (res < 0)
513 fqdir_exit(nf_frag->fqdir);
514 return res;
515 }
516
nf_ct_net_pre_exit(struct net * net)517 static void nf_ct_net_pre_exit(struct net *net)
518 {
519 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
520
521 fqdir_pre_exit(nf_frag->fqdir);
522 }
523
nf_ct_net_exit(struct net * net)524 static void nf_ct_net_exit(struct net *net)
525 {
526 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
527
528 nf_ct_frags6_sysctl_unregister(net);
529 fqdir_exit(nf_frag->fqdir);
530 }
531
532 static struct pernet_operations nf_ct_net_ops = {
533 .init = nf_ct_net_init,
534 .pre_exit = nf_ct_net_pre_exit,
535 .exit = nf_ct_net_exit,
536 .id = &nf_frag_pernet_id,
537 .size = sizeof(struct nft_ct_frag6_pernet),
538 };
539
540 static const struct rhashtable_params nfct_rhash_params = {
541 .head_offset = offsetof(struct inet_frag_queue, node),
542 .hashfn = ip6frag_key_hashfn,
543 .obj_hashfn = ip6frag_obj_hashfn,
544 .obj_cmpfn = ip6frag_obj_cmpfn,
545 .automatic_shrinking = true,
546 };
547
nf_ct_frag6_init(void)548 int nf_ct_frag6_init(void)
549 {
550 int ret = 0;
551
552 nf_frags.constructor = ip6frag_init;
553 nf_frags.destructor = NULL;
554 nf_frags.qsize = sizeof(struct frag_queue);
555 nf_frags.frag_expire = nf_ct_frag6_expire;
556 nf_frags.frags_cache_name = nf_frags_cache_name;
557 nf_frags.rhash_params = nfct_rhash_params;
558 ret = inet_frags_init(&nf_frags);
559 if (ret)
560 goto out;
561 ret = register_pernet_subsys(&nf_ct_net_ops);
562 if (ret)
563 inet_frags_fini(&nf_frags);
564
565 out:
566 return ret;
567 }
568
nf_ct_frag6_cleanup(void)569 void nf_ct_frag6_cleanup(void)
570 {
571 unregister_pernet_subsys(&nf_ct_net_ops);
572 inet_frags_fini(&nf_frags);
573 }
574