1 /* 2 * IPv6 fragment reassembly 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: net/ipv4/ip_fragment.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* 17 * Fixes: 18 * Andi Kleen Make it work with multiple hosts. 19 * More RFC compliance. 20 * 21 * Horst von Brand Add missing #include <linux/string.h> 22 * Alexey Kuznetsov SMP races, threading, cleanup. 23 * Patrick McHardy LRU queue of frag heads for evictor. 24 * Mitsuru KANDA @USAGI Register inet6_protocol{}. 25 * David Stevens and 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 * calculate ICV correctly. 28 */ 29 #include <linux/errno.h> 30 #include <linux/types.h> 31 #include <linux/string.h> 32 #include <linux/socket.h> 33 #include <linux/sockios.h> 34 #include <linux/jiffies.h> 35 #include <linux/net.h> 36 #include <linux/list.h> 37 #include <linux/netdevice.h> 38 #include <linux/in6.h> 39 #include <linux/ipv6.h> 40 #include <linux/icmpv6.h> 41 #include <linux/random.h> 42 #include <linux/jhash.h> 43 #include <linux/skbuff.h> 44 #include <linux/slab.h> 45 #include <linux/export.h> 46 47 #include <net/sock.h> 48 #include <net/snmp.h> 49 50 #include <net/ipv6.h> 51 #include <net/ip6_route.h> 52 #include <net/protocol.h> 53 #include <net/transp_v6.h> 54 #include <net/rawv6.h> 55 #include <net/ndisc.h> 56 #include <net/addrconf.h> 57 #include <net/inet_frag.h> 58 59 struct ip6frag_skb_cb 60 { 61 struct inet6_skb_parm h; 62 int offset; 63 }; 64 65 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 66 67 68 /* 69 * Equivalent of ipv4 struct ipq 70 */ 71 72 struct frag_queue 73 { 74 struct inet_frag_queue q; 75 76 __be32 id; /* fragment id */ 77 u32 user; 78 struct in6_addr saddr; 79 struct in6_addr daddr; 80 81 int iif; 82 unsigned int csum; 83 __u16 nhoffset; 84 }; 85 86 static struct inet_frags ip6_frags; 87 88 int ip6_frag_nqueues(struct net *net) 89 { 90 return net->ipv6.frags.nqueues; 91 } 92 93 int ip6_frag_mem(struct net *net) 94 { 95 return atomic_read(&net->ipv6.frags.mem); 96 } 97 98 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 99 struct net_device *dev); 100 101 /* 102 * callers should be careful not to use the hash value outside the ipfrag_lock 103 * as doing so could race with ipfrag_hash_rnd being recalculated. 104 */ 105 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 106 const struct in6_addr *daddr, u32 rnd) 107 { 108 u32 c; 109 110 c = jhash_3words((__force u32)saddr->s6_addr32[0], 111 (__force u32)saddr->s6_addr32[1], 112 (__force u32)saddr->s6_addr32[2], 113 rnd); 114 115 c = jhash_3words((__force u32)saddr->s6_addr32[3], 116 (__force u32)daddr->s6_addr32[0], 117 (__force u32)daddr->s6_addr32[1], 118 c); 119 120 c = jhash_3words((__force u32)daddr->s6_addr32[2], 121 (__force u32)daddr->s6_addr32[3], 122 (__force u32)id, 123 c); 124 125 return c & (INETFRAGS_HASHSZ - 1); 126 } 127 EXPORT_SYMBOL_GPL(inet6_hash_frag); 128 129 static unsigned int ip6_hashfn(struct inet_frag_queue *q) 130 { 131 struct frag_queue *fq; 132 133 fq = container_of(q, struct frag_queue, q); 134 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 135 } 136 137 int ip6_frag_match(struct inet_frag_queue *q, void *a) 138 { 139 struct frag_queue *fq; 140 struct ip6_create_arg *arg = a; 141 142 fq = container_of(q, struct frag_queue, q); 143 return (fq->id == arg->id && fq->user == arg->user && 144 ipv6_addr_equal(&fq->saddr, arg->src) && 145 ipv6_addr_equal(&fq->daddr, arg->dst)); 146 } 147 EXPORT_SYMBOL(ip6_frag_match); 148 149 void ip6_frag_init(struct inet_frag_queue *q, void *a) 150 { 151 struct frag_queue *fq = container_of(q, struct frag_queue, q); 152 struct ip6_create_arg *arg = a; 153 154 fq->id = arg->id; 155 fq->user = arg->user; 156 fq->saddr = *arg->src; 157 fq->daddr = *arg->dst; 158 } 159 EXPORT_SYMBOL(ip6_frag_init); 160 161 /* Destruction primitives. */ 162 163 static __inline__ void fq_put(struct frag_queue *fq) 164 { 165 inet_frag_put(&fq->q, &ip6_frags); 166 } 167 168 /* Kill fq entry. It is not destroyed immediately, 169 * because caller (and someone more) holds reference count. 170 */ 171 static __inline__ void fq_kill(struct frag_queue *fq) 172 { 173 inet_frag_kill(&fq->q, &ip6_frags); 174 } 175 176 static void ip6_evictor(struct net *net, struct inet6_dev *idev) 177 { 178 int evicted; 179 180 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); 181 if (evicted) 182 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 183 } 184 185 static void ip6_frag_expire(unsigned long data) 186 { 187 struct frag_queue *fq; 188 struct net_device *dev = NULL; 189 struct net *net; 190 191 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); 192 193 spin_lock(&fq->q.lock); 194 195 if (fq->q.last_in & INET_FRAG_COMPLETE) 196 goto out; 197 198 fq_kill(fq); 199 200 net = container_of(fq->q.net, struct net, ipv6.frags); 201 rcu_read_lock(); 202 dev = dev_get_by_index_rcu(net, fq->iif); 203 if (!dev) 204 goto out_rcu_unlock; 205 206 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 207 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 208 209 /* Don't send error if the first segment did not arrive. */ 210 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 211 goto out_rcu_unlock; 212 213 /* 214 But use as source device on which LAST ARRIVED 215 segment was received. And do not use fq->dev 216 pointer directly, device might already disappeared. 217 */ 218 fq->q.fragments->dev = dev; 219 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); 220 out_rcu_unlock: 221 rcu_read_unlock(); 222 out: 223 spin_unlock(&fq->q.lock); 224 fq_put(fq); 225 } 226 227 static __inline__ struct frag_queue * 228 fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst) 229 { 230 struct inet_frag_queue *q; 231 struct ip6_create_arg arg; 232 unsigned int hash; 233 234 arg.id = id; 235 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 236 arg.src = src; 237 arg.dst = dst; 238 239 read_lock(&ip6_frags.lock); 240 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 241 242 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 243 if (q == NULL) 244 return NULL; 245 246 return container_of(q, struct frag_queue, q); 247 } 248 249 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 250 struct frag_hdr *fhdr, int nhoff) 251 { 252 struct sk_buff *prev, *next; 253 struct net_device *dev; 254 int offset, end; 255 struct net *net = dev_net(skb_dst(skb)->dev); 256 257 if (fq->q.last_in & INET_FRAG_COMPLETE) 258 goto err; 259 260 offset = ntohs(fhdr->frag_off) & ~0x7; 261 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 262 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 263 264 if ((unsigned int)end > IPV6_MAXPLEN) { 265 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 266 IPSTATS_MIB_INHDRERRORS); 267 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 268 ((u8 *)&fhdr->frag_off - 269 skb_network_header(skb))); 270 return -1; 271 } 272 273 if (skb->ip_summed == CHECKSUM_COMPLETE) { 274 const unsigned char *nh = skb_network_header(skb); 275 skb->csum = csum_sub(skb->csum, 276 csum_partial(nh, (u8 *)(fhdr + 1) - nh, 277 0)); 278 } 279 280 /* Is this the final fragment? */ 281 if (!(fhdr->frag_off & htons(IP6_MF))) { 282 /* If we already have some bits beyond end 283 * or have different end, the segment is corrupted. 284 */ 285 if (end < fq->q.len || 286 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) 287 goto err; 288 fq->q.last_in |= INET_FRAG_LAST_IN; 289 fq->q.len = end; 290 } else { 291 /* Check if the fragment is rounded to 8 bytes. 292 * Required by the RFC. 293 */ 294 if (end & 0x7) { 295 /* RFC2460 says always send parameter problem in 296 * this case. -DaveM 297 */ 298 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 299 IPSTATS_MIB_INHDRERRORS); 300 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 301 offsetof(struct ipv6hdr, payload_len)); 302 return -1; 303 } 304 if (end > fq->q.len) { 305 /* Some bits beyond end -> corruption. */ 306 if (fq->q.last_in & INET_FRAG_LAST_IN) 307 goto err; 308 fq->q.len = end; 309 } 310 } 311 312 if (end == offset) 313 goto err; 314 315 /* Point into the IP datagram 'data' part. */ 316 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 317 goto err; 318 319 if (pskb_trim_rcsum(skb, end - offset)) 320 goto err; 321 322 /* Find out which fragments are in front and at the back of us 323 * in the chain of fragments so far. We must know where to put 324 * this fragment, right? 325 */ 326 prev = fq->q.fragments_tail; 327 if (!prev || FRAG6_CB(prev)->offset < offset) { 328 next = NULL; 329 goto found; 330 } 331 prev = NULL; 332 for(next = fq->q.fragments; next != NULL; next = next->next) { 333 if (FRAG6_CB(next)->offset >= offset) 334 break; /* bingo! */ 335 prev = next; 336 } 337 338 found: 339 /* RFC5722, Section 4, amended by Errata ID : 3089 340 * When reassembling an IPv6 datagram, if 341 * one or more its constituent fragments is determined to be an 342 * overlapping fragment, the entire datagram (and any constituent 343 * fragments) MUST be silently discarded. 344 */ 345 346 /* Check for overlap with preceding fragment. */ 347 if (prev && 348 (FRAG6_CB(prev)->offset + prev->len) > offset) 349 goto discard_fq; 350 351 /* Look for overlap with succeeding segment. */ 352 if (next && FRAG6_CB(next)->offset < end) 353 goto discard_fq; 354 355 FRAG6_CB(skb)->offset = offset; 356 357 /* Insert this fragment in the chain of fragments. */ 358 skb->next = next; 359 if (!next) 360 fq->q.fragments_tail = skb; 361 if (prev) 362 prev->next = skb; 363 else 364 fq->q.fragments = skb; 365 366 dev = skb->dev; 367 if (dev) { 368 fq->iif = dev->ifindex; 369 skb->dev = NULL; 370 } 371 fq->q.stamp = skb->tstamp; 372 fq->q.meat += skb->len; 373 atomic_add(skb->truesize, &fq->q.net->mem); 374 375 /* The first fragment. 376 * nhoffset is obtained from the first fragment, of course. 377 */ 378 if (offset == 0) { 379 fq->nhoffset = nhoff; 380 fq->q.last_in |= INET_FRAG_FIRST_IN; 381 } 382 383 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 384 fq->q.meat == fq->q.len) 385 return ip6_frag_reasm(fq, prev, dev); 386 387 write_lock(&ip6_frags.lock); 388 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 389 write_unlock(&ip6_frags.lock); 390 return -1; 391 392 discard_fq: 393 fq_kill(fq); 394 err: 395 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 396 IPSTATS_MIB_REASMFAILS); 397 kfree_skb(skb); 398 return -1; 399 } 400 401 /* 402 * Check if this packet is complete. 403 * Returns NULL on failure by any reason, and pointer 404 * to current nexthdr field in reassembled frame. 405 * 406 * It is called with locked fq, and caller must check that 407 * queue is eligible for reassembly i.e. it is not COMPLETE, 408 * the last and the first frames arrived and all the bits are here. 409 */ 410 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 411 struct net_device *dev) 412 { 413 struct net *net = container_of(fq->q.net, struct net, ipv6.frags); 414 struct sk_buff *fp, *head = fq->q.fragments; 415 int payload_len; 416 unsigned int nhoff; 417 418 fq_kill(fq); 419 420 /* Make the one we just received the head. */ 421 if (prev) { 422 head = prev->next; 423 fp = skb_clone(head, GFP_ATOMIC); 424 425 if (!fp) 426 goto out_oom; 427 428 fp->next = head->next; 429 if (!fp->next) 430 fq->q.fragments_tail = fp; 431 prev->next = fp; 432 433 skb_morph(head, fq->q.fragments); 434 head->next = fq->q.fragments->next; 435 436 kfree_skb(fq->q.fragments); 437 fq->q.fragments = head; 438 } 439 440 WARN_ON(head == NULL); 441 WARN_ON(FRAG6_CB(head)->offset != 0); 442 443 /* Unfragmented part is taken from the first segment. */ 444 payload_len = ((head->data - skb_network_header(head)) - 445 sizeof(struct ipv6hdr) + fq->q.len - 446 sizeof(struct frag_hdr)); 447 if (payload_len > IPV6_MAXPLEN) 448 goto out_oversize; 449 450 /* Head of list must not be cloned. */ 451 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 452 goto out_oom; 453 454 /* If the first fragment is fragmented itself, we split 455 * it to two chunks: the first with data and paged part 456 * and the second, holding only fragments. */ 457 if (skb_has_frag_list(head)) { 458 struct sk_buff *clone; 459 int i, plen = 0; 460 461 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 462 goto out_oom; 463 clone->next = head->next; 464 head->next = clone; 465 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 466 skb_frag_list_init(head); 467 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 468 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 469 clone->len = clone->data_len = head->data_len - plen; 470 head->data_len -= clone->len; 471 head->len -= clone->len; 472 clone->csum = 0; 473 clone->ip_summed = head->ip_summed; 474 atomic_add(clone->truesize, &fq->q.net->mem); 475 } 476 477 /* We have to remove fragment header from datagram and to relocate 478 * header in order to calculate ICV correctly. */ 479 nhoff = fq->nhoffset; 480 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 481 memmove(head->head + sizeof(struct frag_hdr), head->head, 482 (head->data - head->head) - sizeof(struct frag_hdr)); 483 head->mac_header += sizeof(struct frag_hdr); 484 head->network_header += sizeof(struct frag_hdr); 485 486 skb_shinfo(head)->frag_list = head->next; 487 skb_reset_transport_header(head); 488 skb_push(head, head->data - skb_network_header(head)); 489 490 for (fp=head->next; fp; fp = fp->next) { 491 head->data_len += fp->len; 492 head->len += fp->len; 493 if (head->ip_summed != fp->ip_summed) 494 head->ip_summed = CHECKSUM_NONE; 495 else if (head->ip_summed == CHECKSUM_COMPLETE) 496 head->csum = csum_add(head->csum, fp->csum); 497 head->truesize += fp->truesize; 498 } 499 atomic_sub(head->truesize, &fq->q.net->mem); 500 501 head->next = NULL; 502 head->dev = dev; 503 head->tstamp = fq->q.stamp; 504 ipv6_hdr(head)->payload_len = htons(payload_len); 505 IP6CB(head)->nhoff = nhoff; 506 507 /* Yes, and fold redundant checksum back. 8) */ 508 if (head->ip_summed == CHECKSUM_COMPLETE) 509 head->csum = csum_partial(skb_network_header(head), 510 skb_network_header_len(head), 511 head->csum); 512 513 rcu_read_lock(); 514 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 515 rcu_read_unlock(); 516 fq->q.fragments = NULL; 517 fq->q.fragments_tail = NULL; 518 return 1; 519 520 out_oversize: 521 if (net_ratelimit()) 522 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); 523 goto out_fail; 524 out_oom: 525 if (net_ratelimit()) 526 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 527 out_fail: 528 rcu_read_lock(); 529 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 530 rcu_read_unlock(); 531 return -1; 532 } 533 534 static int ipv6_frag_rcv(struct sk_buff *skb) 535 { 536 struct frag_hdr *fhdr; 537 struct frag_queue *fq; 538 const struct ipv6hdr *hdr = ipv6_hdr(skb); 539 struct net *net = dev_net(skb_dst(skb)->dev); 540 541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 542 543 /* Jumbo payload inhibits frag. header */ 544 if (hdr->payload_len==0) 545 goto fail_hdr; 546 547 if (!pskb_may_pull(skb, (skb_transport_offset(skb) + 548 sizeof(struct frag_hdr)))) 549 goto fail_hdr; 550 551 hdr = ipv6_hdr(skb); 552 fhdr = (struct frag_hdr *)skb_transport_header(skb); 553 554 if (!(fhdr->frag_off & htons(0xFFF9))) { 555 /* It is not a fragmented frame */ 556 skb->transport_header += sizeof(struct frag_hdr); 557 IP6_INC_STATS_BH(net, 558 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 559 560 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 561 return 1; 562 } 563 564 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 565 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 566 567 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); 568 if (fq != NULL) { 569 int ret; 570 571 spin_lock(&fq->q.lock); 572 573 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 574 575 spin_unlock(&fq->q.lock); 576 fq_put(fq); 577 return ret; 578 } 579 580 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); 581 kfree_skb(skb); 582 return -1; 583 584 fail_hdr: 585 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); 586 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 587 return -1; 588 } 589 590 static const struct inet6_protocol frag_protocol = 591 { 592 .handler = ipv6_frag_rcv, 593 .flags = INET6_PROTO_NOPOLICY, 594 }; 595 596 #ifdef CONFIG_SYSCTL 597 static struct ctl_table ip6_frags_ns_ctl_table[] = { 598 { 599 .procname = "ip6frag_high_thresh", 600 .data = &init_net.ipv6.frags.high_thresh, 601 .maxlen = sizeof(int), 602 .mode = 0644, 603 .proc_handler = proc_dointvec 604 }, 605 { 606 .procname = "ip6frag_low_thresh", 607 .data = &init_net.ipv6.frags.low_thresh, 608 .maxlen = sizeof(int), 609 .mode = 0644, 610 .proc_handler = proc_dointvec 611 }, 612 { 613 .procname = "ip6frag_time", 614 .data = &init_net.ipv6.frags.timeout, 615 .maxlen = sizeof(int), 616 .mode = 0644, 617 .proc_handler = proc_dointvec_jiffies, 618 }, 619 { } 620 }; 621 622 static struct ctl_table ip6_frags_ctl_table[] = { 623 { 624 .procname = "ip6frag_secret_interval", 625 .data = &ip6_frags.secret_interval, 626 .maxlen = sizeof(int), 627 .mode = 0644, 628 .proc_handler = proc_dointvec_jiffies, 629 }, 630 { } 631 }; 632 633 static int __net_init ip6_frags_ns_sysctl_register(struct net *net) 634 { 635 struct ctl_table *table; 636 struct ctl_table_header *hdr; 637 638 table = ip6_frags_ns_ctl_table; 639 if (!net_eq(net, &init_net)) { 640 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 641 if (table == NULL) 642 goto err_alloc; 643 644 table[0].data = &net->ipv6.frags.high_thresh; 645 table[1].data = &net->ipv6.frags.low_thresh; 646 table[2].data = &net->ipv6.frags.timeout; 647 } 648 649 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 650 if (hdr == NULL) 651 goto err_reg; 652 653 net->ipv6.sysctl.frags_hdr = hdr; 654 return 0; 655 656 err_reg: 657 if (!net_eq(net, &init_net)) 658 kfree(table); 659 err_alloc: 660 return -ENOMEM; 661 } 662 663 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net) 664 { 665 struct ctl_table *table; 666 667 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 668 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 669 if (!net_eq(net, &init_net)) 670 kfree(table); 671 } 672 673 static struct ctl_table_header *ip6_ctl_header; 674 675 static int ip6_frags_sysctl_register(void) 676 { 677 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 678 ip6_frags_ctl_table); 679 return ip6_ctl_header == NULL ? -ENOMEM : 0; 680 } 681 682 static void ip6_frags_sysctl_unregister(void) 683 { 684 unregister_net_sysctl_table(ip6_ctl_header); 685 } 686 #else 687 static inline int ip6_frags_ns_sysctl_register(struct net *net) 688 { 689 return 0; 690 } 691 692 static inline void ip6_frags_ns_sysctl_unregister(struct net *net) 693 { 694 } 695 696 static inline int ip6_frags_sysctl_register(void) 697 { 698 return 0; 699 } 700 701 static inline void ip6_frags_sysctl_unregister(void) 702 { 703 } 704 #endif 705 706 static int __net_init ipv6_frags_init_net(struct net *net) 707 { 708 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; 709 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; 710 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 711 712 inet_frags_init_net(&net->ipv6.frags); 713 714 return ip6_frags_ns_sysctl_register(net); 715 } 716 717 static void __net_exit ipv6_frags_exit_net(struct net *net) 718 { 719 ip6_frags_ns_sysctl_unregister(net); 720 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 721 } 722 723 static struct pernet_operations ip6_frags_ops = { 724 .init = ipv6_frags_init_net, 725 .exit = ipv6_frags_exit_net, 726 }; 727 728 int __init ipv6_frag_init(void) 729 { 730 int ret; 731 732 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); 733 if (ret) 734 goto out; 735 736 ret = ip6_frags_sysctl_register(); 737 if (ret) 738 goto err_sysctl; 739 740 ret = register_pernet_subsys(&ip6_frags_ops); 741 if (ret) 742 goto err_pernet; 743 744 ip6_frags.hashfn = ip6_hashfn; 745 ip6_frags.constructor = ip6_frag_init; 746 ip6_frags.destructor = NULL; 747 ip6_frags.skb_free = NULL; 748 ip6_frags.qsize = sizeof(struct frag_queue); 749 ip6_frags.match = ip6_frag_match; 750 ip6_frags.frag_expire = ip6_frag_expire; 751 ip6_frags.secret_interval = 10 * 60 * HZ; 752 inet_frags_init(&ip6_frags); 753 out: 754 return ret; 755 756 err_pernet: 757 ip6_frags_sysctl_unregister(); 758 err_sysctl: 759 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 760 goto out; 761 } 762 763 void ipv6_frag_exit(void) 764 { 765 inet_frags_fini(&ip6_frags); 766 ip6_frags_sysctl_unregister(); 767 unregister_pernet_subsys(&ip6_frags_ops); 768 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 769 } 770