1 /* 2 * IPv6 fragment reassembly 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: net/ipv4/ip_fragment.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* 17 * Fixes: 18 * Andi Kleen Make it work with multiple hosts. 19 * More RFC compliance. 20 * 21 * Horst von Brand Add missing #include <linux/string.h> 22 * Alexey Kuznetsov SMP races, threading, cleanup. 23 * Patrick McHardy LRU queue of frag heads for evictor. 24 * Mitsuru KANDA @USAGI Register inet6_protocol{}. 25 * David Stevens and 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 * calculate ICV correctly. 28 */ 29 #include <linux/errno.h> 30 #include <linux/types.h> 31 #include <linux/string.h> 32 #include <linux/socket.h> 33 #include <linux/sockios.h> 34 #include <linux/jiffies.h> 35 #include <linux/net.h> 36 #include <linux/list.h> 37 #include <linux/netdevice.h> 38 #include <linux/in6.h> 39 #include <linux/ipv6.h> 40 #include <linux/icmpv6.h> 41 #include <linux/random.h> 42 #include <linux/jhash.h> 43 #include <linux/skbuff.h> 44 45 #include <net/sock.h> 46 #include <net/snmp.h> 47 48 #include <net/ipv6.h> 49 #include <net/ip6_route.h> 50 #include <net/protocol.h> 51 #include <net/transp_v6.h> 52 #include <net/rawv6.h> 53 #include <net/ndisc.h> 54 #include <net/addrconf.h> 55 #include <net/inet_frag.h> 56 57 struct ip6frag_skb_cb 58 { 59 struct inet6_skb_parm h; 60 int offset; 61 }; 62 63 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 64 65 66 /* 67 * Equivalent of ipv4 struct ipq 68 */ 69 70 struct frag_queue 71 { 72 struct inet_frag_queue q; 73 74 __be32 id; /* fragment id */ 75 u32 user; 76 struct in6_addr saddr; 77 struct in6_addr daddr; 78 79 int iif; 80 unsigned int csum; 81 __u16 nhoffset; 82 }; 83 84 static struct inet_frags ip6_frags; 85 86 int ip6_frag_nqueues(struct net *net) 87 { 88 return net->ipv6.frags.nqueues; 89 } 90 91 int ip6_frag_mem(struct net *net) 92 { 93 return atomic_read(&net->ipv6.frags.mem); 94 } 95 96 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 97 struct net_device *dev); 98 99 /* 100 * callers should be careful not to use the hash value outside the ipfrag_lock 101 * as doing so could race with ipfrag_hash_rnd being recalculated. 102 */ 103 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 104 const struct in6_addr *daddr, u32 rnd) 105 { 106 u32 a, b, c; 107 108 a = (__force u32)saddr->s6_addr32[0]; 109 b = (__force u32)saddr->s6_addr32[1]; 110 c = (__force u32)saddr->s6_addr32[2]; 111 112 a += JHASH_GOLDEN_RATIO; 113 b += JHASH_GOLDEN_RATIO; 114 c += rnd; 115 __jhash_mix(a, b, c); 116 117 a += (__force u32)saddr->s6_addr32[3]; 118 b += (__force u32)daddr->s6_addr32[0]; 119 c += (__force u32)daddr->s6_addr32[1]; 120 __jhash_mix(a, b, c); 121 122 a += (__force u32)daddr->s6_addr32[2]; 123 b += (__force u32)daddr->s6_addr32[3]; 124 c += (__force u32)id; 125 __jhash_mix(a, b, c); 126 127 return c & (INETFRAGS_HASHSZ - 1); 128 } 129 EXPORT_SYMBOL_GPL(inet6_hash_frag); 130 131 static unsigned int ip6_hashfn(struct inet_frag_queue *q) 132 { 133 struct frag_queue *fq; 134 135 fq = container_of(q, struct frag_queue, q); 136 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 137 } 138 139 int ip6_frag_match(struct inet_frag_queue *q, void *a) 140 { 141 struct frag_queue *fq; 142 struct ip6_create_arg *arg = a; 143 144 fq = container_of(q, struct frag_queue, q); 145 return (fq->id == arg->id && fq->user == arg->user && 146 ipv6_addr_equal(&fq->saddr, arg->src) && 147 ipv6_addr_equal(&fq->daddr, arg->dst)); 148 } 149 EXPORT_SYMBOL(ip6_frag_match); 150 151 /* Memory Tracking Functions. */ 152 static inline void frag_kfree_skb(struct netns_frags *nf, 153 struct sk_buff *skb, int *work) 154 { 155 if (work) 156 *work -= skb->truesize; 157 atomic_sub(skb->truesize, &nf->mem); 158 kfree_skb(skb); 159 } 160 161 void ip6_frag_init(struct inet_frag_queue *q, void *a) 162 { 163 struct frag_queue *fq = container_of(q, struct frag_queue, q); 164 struct ip6_create_arg *arg = a; 165 166 fq->id = arg->id; 167 fq->user = arg->user; 168 ipv6_addr_copy(&fq->saddr, arg->src); 169 ipv6_addr_copy(&fq->daddr, arg->dst); 170 } 171 EXPORT_SYMBOL(ip6_frag_init); 172 173 /* Destruction primitives. */ 174 175 static __inline__ void fq_put(struct frag_queue *fq) 176 { 177 inet_frag_put(&fq->q, &ip6_frags); 178 } 179 180 /* Kill fq entry. It is not destroyed immediately, 181 * because caller (and someone more) holds reference count. 182 */ 183 static __inline__ void fq_kill(struct frag_queue *fq) 184 { 185 inet_frag_kill(&fq->q, &ip6_frags); 186 } 187 188 static void ip6_evictor(struct net *net, struct inet6_dev *idev) 189 { 190 int evicted; 191 192 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); 193 if (evicted) 194 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 195 } 196 197 static void ip6_frag_expire(unsigned long data) 198 { 199 struct frag_queue *fq; 200 struct net_device *dev = NULL; 201 struct net *net; 202 203 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); 204 205 spin_lock(&fq->q.lock); 206 207 if (fq->q.last_in & INET_FRAG_COMPLETE) 208 goto out; 209 210 fq_kill(fq); 211 212 net = container_of(fq->q.net, struct net, ipv6.frags); 213 rcu_read_lock(); 214 dev = dev_get_by_index_rcu(net, fq->iif); 215 if (!dev) 216 goto out_rcu_unlock; 217 218 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 219 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 220 221 /* Don't send error if the first segment did not arrive. */ 222 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 223 goto out_rcu_unlock; 224 225 /* 226 But use as source device on which LAST ARRIVED 227 segment was received. And do not use fq->dev 228 pointer directly, device might already disappeared. 229 */ 230 fq->q.fragments->dev = dev; 231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 232 out_rcu_unlock: 233 rcu_read_unlock(); 234 out: 235 spin_unlock(&fq->q.lock); 236 fq_put(fq); 237 } 238 239 static __inline__ struct frag_queue * 240 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, 241 struct inet6_dev *idev) 242 { 243 struct inet_frag_queue *q; 244 struct ip6_create_arg arg; 245 unsigned int hash; 246 247 arg.id = id; 248 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 249 arg.src = src; 250 arg.dst = dst; 251 252 read_lock(&ip6_frags.lock); 253 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 254 255 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 256 if (q == NULL) 257 goto oom; 258 259 return container_of(q, struct frag_queue, q); 260 261 oom: 262 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS); 263 return NULL; 264 } 265 266 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 267 struct frag_hdr *fhdr, int nhoff) 268 { 269 struct sk_buff *prev, *next; 270 struct net_device *dev; 271 int offset, end; 272 struct net *net = dev_net(skb_dst(skb)->dev); 273 274 if (fq->q.last_in & INET_FRAG_COMPLETE) 275 goto err; 276 277 offset = ntohs(fhdr->frag_off) & ~0x7; 278 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 279 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 280 281 if ((unsigned int)end > IPV6_MAXPLEN) { 282 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 283 IPSTATS_MIB_INHDRERRORS); 284 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 285 ((u8 *)&fhdr->frag_off - 286 skb_network_header(skb))); 287 return -1; 288 } 289 290 if (skb->ip_summed == CHECKSUM_COMPLETE) { 291 const unsigned char *nh = skb_network_header(skb); 292 skb->csum = csum_sub(skb->csum, 293 csum_partial(nh, (u8 *)(fhdr + 1) - nh, 294 0)); 295 } 296 297 /* Is this the final fragment? */ 298 if (!(fhdr->frag_off & htons(IP6_MF))) { 299 /* If we already have some bits beyond end 300 * or have different end, the segment is corrupted. 301 */ 302 if (end < fq->q.len || 303 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) 304 goto err; 305 fq->q.last_in |= INET_FRAG_LAST_IN; 306 fq->q.len = end; 307 } else { 308 /* Check if the fragment is rounded to 8 bytes. 309 * Required by the RFC. 310 */ 311 if (end & 0x7) { 312 /* RFC2460 says always send parameter problem in 313 * this case. -DaveM 314 */ 315 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 316 IPSTATS_MIB_INHDRERRORS); 317 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 318 offsetof(struct ipv6hdr, payload_len)); 319 return -1; 320 } 321 if (end > fq->q.len) { 322 /* Some bits beyond end -> corruption. */ 323 if (fq->q.last_in & INET_FRAG_LAST_IN) 324 goto err; 325 fq->q.len = end; 326 } 327 } 328 329 if (end == offset) 330 goto err; 331 332 /* Point into the IP datagram 'data' part. */ 333 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 334 goto err; 335 336 if (pskb_trim_rcsum(skb, end - offset)) 337 goto err; 338 339 /* Find out which fragments are in front and at the back of us 340 * in the chain of fragments so far. We must know where to put 341 * this fragment, right? 342 */ 343 prev = NULL; 344 for(next = fq->q.fragments; next != NULL; next = next->next) { 345 if (FRAG6_CB(next)->offset >= offset) 346 break; /* bingo! */ 347 prev = next; 348 } 349 350 /* We found where to put this one. Check for overlap with 351 * preceding fragment, and, if needed, align things so that 352 * any overlaps are eliminated. 353 */ 354 if (prev) { 355 int i = (FRAG6_CB(prev)->offset + prev->len) - offset; 356 357 if (i > 0) { 358 offset += i; 359 if (end <= offset) 360 goto err; 361 if (!pskb_pull(skb, i)) 362 goto err; 363 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 364 skb->ip_summed = CHECKSUM_NONE; 365 } 366 } 367 368 /* Look for overlap with succeeding segments. 369 * If we can merge fragments, do it. 370 */ 371 while (next && FRAG6_CB(next)->offset < end) { 372 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ 373 374 if (i < next->len) { 375 /* Eat head of the next overlapped fragment 376 * and leave the loop. The next ones cannot overlap. 377 */ 378 if (!pskb_pull(next, i)) 379 goto err; 380 FRAG6_CB(next)->offset += i; /* next fragment */ 381 fq->q.meat -= i; 382 if (next->ip_summed != CHECKSUM_UNNECESSARY) 383 next->ip_summed = CHECKSUM_NONE; 384 break; 385 } else { 386 struct sk_buff *free_it = next; 387 388 /* Old fragment is completely overridden with 389 * new one drop it. 390 */ 391 next = next->next; 392 393 if (prev) 394 prev->next = next; 395 else 396 fq->q.fragments = next; 397 398 fq->q.meat -= free_it->len; 399 frag_kfree_skb(fq->q.net, free_it, NULL); 400 } 401 } 402 403 FRAG6_CB(skb)->offset = offset; 404 405 /* Insert this fragment in the chain of fragments. */ 406 skb->next = next; 407 if (prev) 408 prev->next = skb; 409 else 410 fq->q.fragments = skb; 411 412 dev = skb->dev; 413 if (dev) { 414 fq->iif = dev->ifindex; 415 skb->dev = NULL; 416 } 417 fq->q.stamp = skb->tstamp; 418 fq->q.meat += skb->len; 419 atomic_add(skb->truesize, &fq->q.net->mem); 420 421 /* The first fragment. 422 * nhoffset is obtained from the first fragment, of course. 423 */ 424 if (offset == 0) { 425 fq->nhoffset = nhoff; 426 fq->q.last_in |= INET_FRAG_FIRST_IN; 427 } 428 429 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 430 fq->q.meat == fq->q.len) 431 return ip6_frag_reasm(fq, prev, dev); 432 433 write_lock(&ip6_frags.lock); 434 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 435 write_unlock(&ip6_frags.lock); 436 return -1; 437 438 err: 439 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 440 IPSTATS_MIB_REASMFAILS); 441 kfree_skb(skb); 442 return -1; 443 } 444 445 /* 446 * Check if this packet is complete. 447 * Returns NULL on failure by any reason, and pointer 448 * to current nexthdr field in reassembled frame. 449 * 450 * It is called with locked fq, and caller must check that 451 * queue is eligible for reassembly i.e. it is not COMPLETE, 452 * the last and the first frames arrived and all the bits are here. 453 */ 454 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 455 struct net_device *dev) 456 { 457 struct net *net = container_of(fq->q.net, struct net, ipv6.frags); 458 struct sk_buff *fp, *head = fq->q.fragments; 459 int payload_len; 460 unsigned int nhoff; 461 462 fq_kill(fq); 463 464 /* Make the one we just received the head. */ 465 if (prev) { 466 head = prev->next; 467 fp = skb_clone(head, GFP_ATOMIC); 468 469 if (!fp) 470 goto out_oom; 471 472 fp->next = head->next; 473 prev->next = fp; 474 475 skb_morph(head, fq->q.fragments); 476 head->next = fq->q.fragments->next; 477 478 kfree_skb(fq->q.fragments); 479 fq->q.fragments = head; 480 } 481 482 WARN_ON(head == NULL); 483 WARN_ON(FRAG6_CB(head)->offset != 0); 484 485 /* Unfragmented part is taken from the first segment. */ 486 payload_len = ((head->data - skb_network_header(head)) - 487 sizeof(struct ipv6hdr) + fq->q.len - 488 sizeof(struct frag_hdr)); 489 if (payload_len > IPV6_MAXPLEN) 490 goto out_oversize; 491 492 /* Head of list must not be cloned. */ 493 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 494 goto out_oom; 495 496 /* If the first fragment is fragmented itself, we split 497 * it to two chunks: the first with data and paged part 498 * and the second, holding only fragments. */ 499 if (skb_has_frags(head)) { 500 struct sk_buff *clone; 501 int i, plen = 0; 502 503 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 504 goto out_oom; 505 clone->next = head->next; 506 head->next = clone; 507 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 508 skb_frag_list_init(head); 509 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 510 plen += skb_shinfo(head)->frags[i].size; 511 clone->len = clone->data_len = head->data_len - plen; 512 head->data_len -= clone->len; 513 head->len -= clone->len; 514 clone->csum = 0; 515 clone->ip_summed = head->ip_summed; 516 atomic_add(clone->truesize, &fq->q.net->mem); 517 } 518 519 /* We have to remove fragment header from datagram and to relocate 520 * header in order to calculate ICV correctly. */ 521 nhoff = fq->nhoffset; 522 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 523 memmove(head->head + sizeof(struct frag_hdr), head->head, 524 (head->data - head->head) - sizeof(struct frag_hdr)); 525 head->mac_header += sizeof(struct frag_hdr); 526 head->network_header += sizeof(struct frag_hdr); 527 528 skb_shinfo(head)->frag_list = head->next; 529 skb_reset_transport_header(head); 530 skb_push(head, head->data - skb_network_header(head)); 531 atomic_sub(head->truesize, &fq->q.net->mem); 532 533 for (fp=head->next; fp; fp = fp->next) { 534 head->data_len += fp->len; 535 head->len += fp->len; 536 if (head->ip_summed != fp->ip_summed) 537 head->ip_summed = CHECKSUM_NONE; 538 else if (head->ip_summed == CHECKSUM_COMPLETE) 539 head->csum = csum_add(head->csum, fp->csum); 540 head->truesize += fp->truesize; 541 atomic_sub(fp->truesize, &fq->q.net->mem); 542 } 543 544 head->next = NULL; 545 head->dev = dev; 546 head->tstamp = fq->q.stamp; 547 ipv6_hdr(head)->payload_len = htons(payload_len); 548 IP6CB(head)->nhoff = nhoff; 549 550 /* Yes, and fold redundant checksum back. 8) */ 551 if (head->ip_summed == CHECKSUM_COMPLETE) 552 head->csum = csum_partial(skb_network_header(head), 553 skb_network_header_len(head), 554 head->csum); 555 556 rcu_read_lock(); 557 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 558 rcu_read_unlock(); 559 fq->q.fragments = NULL; 560 return 1; 561 562 out_oversize: 563 if (net_ratelimit()) 564 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); 565 goto out_fail; 566 out_oom: 567 if (net_ratelimit()) 568 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 569 out_fail: 570 rcu_read_lock(); 571 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 572 rcu_read_unlock(); 573 return -1; 574 } 575 576 static int ipv6_frag_rcv(struct sk_buff *skb) 577 { 578 struct frag_hdr *fhdr; 579 struct frag_queue *fq; 580 struct ipv6hdr *hdr = ipv6_hdr(skb); 581 struct net *net = dev_net(skb_dst(skb)->dev); 582 583 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 584 585 /* Jumbo payload inhibits frag. header */ 586 if (hdr->payload_len==0) 587 goto fail_hdr; 588 589 if (!pskb_may_pull(skb, (skb_transport_offset(skb) + 590 sizeof(struct frag_hdr)))) 591 goto fail_hdr; 592 593 hdr = ipv6_hdr(skb); 594 fhdr = (struct frag_hdr *)skb_transport_header(skb); 595 596 if (!(fhdr->frag_off & htons(0xFFF9))) { 597 /* It is not a fragmented frame */ 598 skb->transport_header += sizeof(struct frag_hdr); 599 IP6_INC_STATS_BH(net, 600 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 601 602 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 603 return 1; 604 } 605 606 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 607 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 608 609 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 610 ip6_dst_idev(skb_dst(skb)))) != NULL) { 611 int ret; 612 613 spin_lock(&fq->q.lock); 614 615 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 616 617 spin_unlock(&fq->q.lock); 618 fq_put(fq); 619 return ret; 620 } 621 622 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); 623 kfree_skb(skb); 624 return -1; 625 626 fail_hdr: 627 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); 628 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 629 return -1; 630 } 631 632 static const struct inet6_protocol frag_protocol = 633 { 634 .handler = ipv6_frag_rcv, 635 .flags = INET6_PROTO_NOPOLICY, 636 }; 637 638 #ifdef CONFIG_SYSCTL 639 static struct ctl_table ip6_frags_ns_ctl_table[] = { 640 { 641 .procname = "ip6frag_high_thresh", 642 .data = &init_net.ipv6.frags.high_thresh, 643 .maxlen = sizeof(int), 644 .mode = 0644, 645 .proc_handler = proc_dointvec 646 }, 647 { 648 .procname = "ip6frag_low_thresh", 649 .data = &init_net.ipv6.frags.low_thresh, 650 .maxlen = sizeof(int), 651 .mode = 0644, 652 .proc_handler = proc_dointvec 653 }, 654 { 655 .procname = "ip6frag_time", 656 .data = &init_net.ipv6.frags.timeout, 657 .maxlen = sizeof(int), 658 .mode = 0644, 659 .proc_handler = proc_dointvec_jiffies, 660 }, 661 { } 662 }; 663 664 static struct ctl_table ip6_frags_ctl_table[] = { 665 { 666 .procname = "ip6frag_secret_interval", 667 .data = &ip6_frags.secret_interval, 668 .maxlen = sizeof(int), 669 .mode = 0644, 670 .proc_handler = proc_dointvec_jiffies, 671 }, 672 { } 673 }; 674 675 static int ip6_frags_ns_sysctl_register(struct net *net) 676 { 677 struct ctl_table *table; 678 struct ctl_table_header *hdr; 679 680 table = ip6_frags_ns_ctl_table; 681 if (!net_eq(net, &init_net)) { 682 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 683 if (table == NULL) 684 goto err_alloc; 685 686 table[0].data = &net->ipv6.frags.high_thresh; 687 table[1].data = &net->ipv6.frags.low_thresh; 688 table[2].data = &net->ipv6.frags.timeout; 689 } 690 691 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 692 if (hdr == NULL) 693 goto err_reg; 694 695 net->ipv6.sysctl.frags_hdr = hdr; 696 return 0; 697 698 err_reg: 699 if (!net_eq(net, &init_net)) 700 kfree(table); 701 err_alloc: 702 return -ENOMEM; 703 } 704 705 static void ip6_frags_ns_sysctl_unregister(struct net *net) 706 { 707 struct ctl_table *table; 708 709 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 710 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 711 if (!net_eq(net, &init_net)) 712 kfree(table); 713 } 714 715 static struct ctl_table_header *ip6_ctl_header; 716 717 static int ip6_frags_sysctl_register(void) 718 { 719 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 720 ip6_frags_ctl_table); 721 return ip6_ctl_header == NULL ? -ENOMEM : 0; 722 } 723 724 static void ip6_frags_sysctl_unregister(void) 725 { 726 unregister_net_sysctl_table(ip6_ctl_header); 727 } 728 #else 729 static inline int ip6_frags_ns_sysctl_register(struct net *net) 730 { 731 return 0; 732 } 733 734 static inline void ip6_frags_ns_sysctl_unregister(struct net *net) 735 { 736 } 737 738 static inline int ip6_frags_sysctl_register(void) 739 { 740 return 0; 741 } 742 743 static inline void ip6_frags_sysctl_unregister(void) 744 { 745 } 746 #endif 747 748 static int ipv6_frags_init_net(struct net *net) 749 { 750 net->ipv6.frags.high_thresh = 256 * 1024; 751 net->ipv6.frags.low_thresh = 192 * 1024; 752 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 753 754 inet_frags_init_net(&net->ipv6.frags); 755 756 return ip6_frags_ns_sysctl_register(net); 757 } 758 759 static void ipv6_frags_exit_net(struct net *net) 760 { 761 ip6_frags_ns_sysctl_unregister(net); 762 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 763 } 764 765 static struct pernet_operations ip6_frags_ops = { 766 .init = ipv6_frags_init_net, 767 .exit = ipv6_frags_exit_net, 768 }; 769 770 int __init ipv6_frag_init(void) 771 { 772 int ret; 773 774 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); 775 if (ret) 776 goto out; 777 778 ret = ip6_frags_sysctl_register(); 779 if (ret) 780 goto err_sysctl; 781 782 ret = register_pernet_subsys(&ip6_frags_ops); 783 if (ret) 784 goto err_pernet; 785 786 ip6_frags.hashfn = ip6_hashfn; 787 ip6_frags.constructor = ip6_frag_init; 788 ip6_frags.destructor = NULL; 789 ip6_frags.skb_free = NULL; 790 ip6_frags.qsize = sizeof(struct frag_queue); 791 ip6_frags.match = ip6_frag_match; 792 ip6_frags.frag_expire = ip6_frag_expire; 793 ip6_frags.secret_interval = 10 * 60 * HZ; 794 inet_frags_init(&ip6_frags); 795 out: 796 return ret; 797 798 err_pernet: 799 ip6_frags_sysctl_unregister(); 800 err_sysctl: 801 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 802 goto out; 803 } 804 805 void ipv6_frag_exit(void) 806 { 807 inet_frags_fini(&ip6_frags); 808 ip6_frags_sysctl_unregister(); 809 unregister_pernet_subsys(&ip6_frags_ops); 810 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 811 } 812