1 /* 2 * IPv6 fragment reassembly 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: net/ipv4/ip_fragment.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* 17 * Fixes: 18 * Andi Kleen Make it work with multiple hosts. 19 * More RFC compliance. 20 * 21 * Horst von Brand Add missing #include <linux/string.h> 22 * Alexey Kuznetsov SMP races, threading, cleanup. 23 * Patrick McHardy LRU queue of frag heads for evictor. 24 * Mitsuru KANDA @USAGI Register inet6_protocol{}. 25 * David Stevens and 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 * calculate ICV correctly. 28 */ 29 #include <linux/errno.h> 30 #include <linux/types.h> 31 #include <linux/string.h> 32 #include <linux/socket.h> 33 #include <linux/sockios.h> 34 #include <linux/jiffies.h> 35 #include <linux/net.h> 36 #include <linux/list.h> 37 #include <linux/netdevice.h> 38 #include <linux/in6.h> 39 #include <linux/ipv6.h> 40 #include <linux/icmpv6.h> 41 #include <linux/random.h> 42 #include <linux/jhash.h> 43 #include <linux/skbuff.h> 44 45 #include <net/sock.h> 46 #include <net/snmp.h> 47 48 #include <net/ipv6.h> 49 #include <net/ip6_route.h> 50 #include <net/protocol.h> 51 #include <net/transp_v6.h> 52 #include <net/rawv6.h> 53 #include <net/ndisc.h> 54 #include <net/addrconf.h> 55 #include <net/inet_frag.h> 56 57 struct ip6frag_skb_cb 58 { 59 struct inet6_skb_parm h; 60 int offset; 61 }; 62 63 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 64 65 66 /* 67 * Equivalent of ipv4 struct ipq 68 */ 69 70 struct frag_queue 71 { 72 struct inet_frag_queue q; 73 74 __be32 id; /* fragment id */ 75 struct in6_addr saddr; 76 struct in6_addr daddr; 77 78 int iif; 79 unsigned int csum; 80 __u16 nhoffset; 81 }; 82 83 static struct inet_frags ip6_frags; 84 85 int ip6_frag_nqueues(struct net *net) 86 { 87 return net->ipv6.frags.nqueues; 88 } 89 90 int ip6_frag_mem(struct net *net) 91 { 92 return atomic_read(&net->ipv6.frags.mem); 93 } 94 95 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 96 struct net_device *dev); 97 98 /* 99 * callers should be careful not to use the hash value outside the ipfrag_lock 100 * as doing so could race with ipfrag_hash_rnd being recalculated. 101 */ 102 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 103 const struct in6_addr *daddr, u32 rnd) 104 { 105 u32 a, b, c; 106 107 a = (__force u32)saddr->s6_addr32[0]; 108 b = (__force u32)saddr->s6_addr32[1]; 109 c = (__force u32)saddr->s6_addr32[2]; 110 111 a += JHASH_GOLDEN_RATIO; 112 b += JHASH_GOLDEN_RATIO; 113 c += rnd; 114 __jhash_mix(a, b, c); 115 116 a += (__force u32)saddr->s6_addr32[3]; 117 b += (__force u32)daddr->s6_addr32[0]; 118 c += (__force u32)daddr->s6_addr32[1]; 119 __jhash_mix(a, b, c); 120 121 a += (__force u32)daddr->s6_addr32[2]; 122 b += (__force u32)daddr->s6_addr32[3]; 123 c += (__force u32)id; 124 __jhash_mix(a, b, c); 125 126 return c & (INETFRAGS_HASHSZ - 1); 127 } 128 EXPORT_SYMBOL_GPL(inet6_hash_frag); 129 130 static unsigned int ip6_hashfn(struct inet_frag_queue *q) 131 { 132 struct frag_queue *fq; 133 134 fq = container_of(q, struct frag_queue, q); 135 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 136 } 137 138 int ip6_frag_match(struct inet_frag_queue *q, void *a) 139 { 140 struct frag_queue *fq; 141 struct ip6_create_arg *arg = a; 142 143 fq = container_of(q, struct frag_queue, q); 144 return (fq->id == arg->id && 145 ipv6_addr_equal(&fq->saddr, arg->src) && 146 ipv6_addr_equal(&fq->daddr, arg->dst)); 147 } 148 EXPORT_SYMBOL(ip6_frag_match); 149 150 /* Memory Tracking Functions. */ 151 static inline void frag_kfree_skb(struct netns_frags *nf, 152 struct sk_buff *skb, int *work) 153 { 154 if (work) 155 *work -= skb->truesize; 156 atomic_sub(skb->truesize, &nf->mem); 157 kfree_skb(skb); 158 } 159 160 void ip6_frag_init(struct inet_frag_queue *q, void *a) 161 { 162 struct frag_queue *fq = container_of(q, struct frag_queue, q); 163 struct ip6_create_arg *arg = a; 164 165 fq->id = arg->id; 166 ipv6_addr_copy(&fq->saddr, arg->src); 167 ipv6_addr_copy(&fq->daddr, arg->dst); 168 } 169 EXPORT_SYMBOL(ip6_frag_init); 170 171 /* Destruction primitives. */ 172 173 static __inline__ void fq_put(struct frag_queue *fq) 174 { 175 inet_frag_put(&fq->q, &ip6_frags); 176 } 177 178 /* Kill fq entry. It is not destroyed immediately, 179 * because caller (and someone more) holds reference count. 180 */ 181 static __inline__ void fq_kill(struct frag_queue *fq) 182 { 183 inet_frag_kill(&fq->q, &ip6_frags); 184 } 185 186 static void ip6_evictor(struct net *net, struct inet6_dev *idev) 187 { 188 int evicted; 189 190 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); 191 if (evicted) 192 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 193 } 194 195 static void ip6_frag_expire(unsigned long data) 196 { 197 struct frag_queue *fq; 198 struct net_device *dev = NULL; 199 struct net *net; 200 201 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); 202 203 spin_lock(&fq->q.lock); 204 205 if (fq->q.last_in & INET_FRAG_COMPLETE) 206 goto out; 207 208 fq_kill(fq); 209 210 net = container_of(fq->q.net, struct net, ipv6.frags); 211 dev = dev_get_by_index(net, fq->iif); 212 if (!dev) 213 goto out; 214 215 rcu_read_lock(); 216 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 217 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 218 rcu_read_unlock(); 219 220 /* Don't send error if the first segment did not arrive. */ 221 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 222 goto out; 223 224 /* 225 But use as source device on which LAST ARRIVED 226 segment was received. And do not use fq->dev 227 pointer directly, device might already disappeared. 228 */ 229 fq->q.fragments->dev = dev; 230 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 231 out: 232 if (dev) 233 dev_put(dev); 234 spin_unlock(&fq->q.lock); 235 fq_put(fq); 236 } 237 238 static __inline__ struct frag_queue * 239 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, 240 struct inet6_dev *idev) 241 { 242 struct inet_frag_queue *q; 243 struct ip6_create_arg arg; 244 unsigned int hash; 245 246 arg.id = id; 247 arg.src = src; 248 arg.dst = dst; 249 250 read_lock(&ip6_frags.lock); 251 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 252 253 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 254 if (q == NULL) 255 goto oom; 256 257 return container_of(q, struct frag_queue, q); 258 259 oom: 260 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS); 261 return NULL; 262 } 263 264 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 265 struct frag_hdr *fhdr, int nhoff) 266 { 267 struct sk_buff *prev, *next; 268 struct net_device *dev; 269 int offset, end; 270 struct net *net = dev_net(skb->dst->dev); 271 272 if (fq->q.last_in & INET_FRAG_COMPLETE) 273 goto err; 274 275 offset = ntohs(fhdr->frag_off) & ~0x7; 276 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 277 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 278 279 if ((unsigned int)end > IPV6_MAXPLEN) { 280 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 281 IPSTATS_MIB_INHDRERRORS); 282 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 283 ((u8 *)&fhdr->frag_off - 284 skb_network_header(skb))); 285 return -1; 286 } 287 288 if (skb->ip_summed == CHECKSUM_COMPLETE) { 289 const unsigned char *nh = skb_network_header(skb); 290 skb->csum = csum_sub(skb->csum, 291 csum_partial(nh, (u8 *)(fhdr + 1) - nh, 292 0)); 293 } 294 295 /* Is this the final fragment? */ 296 if (!(fhdr->frag_off & htons(IP6_MF))) { 297 /* If we already have some bits beyond end 298 * or have different end, the segment is corrupted. 299 */ 300 if (end < fq->q.len || 301 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) 302 goto err; 303 fq->q.last_in |= INET_FRAG_LAST_IN; 304 fq->q.len = end; 305 } else { 306 /* Check if the fragment is rounded to 8 bytes. 307 * Required by the RFC. 308 */ 309 if (end & 0x7) { 310 /* RFC2460 says always send parameter problem in 311 * this case. -DaveM 312 */ 313 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), 314 IPSTATS_MIB_INHDRERRORS); 315 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 316 offsetof(struct ipv6hdr, payload_len)); 317 return -1; 318 } 319 if (end > fq->q.len) { 320 /* Some bits beyond end -> corruption. */ 321 if (fq->q.last_in & INET_FRAG_LAST_IN) 322 goto err; 323 fq->q.len = end; 324 } 325 } 326 327 if (end == offset) 328 goto err; 329 330 /* Point into the IP datagram 'data' part. */ 331 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 332 goto err; 333 334 if (pskb_trim_rcsum(skb, end - offset)) 335 goto err; 336 337 /* Find out which fragments are in front and at the back of us 338 * in the chain of fragments so far. We must know where to put 339 * this fragment, right? 340 */ 341 prev = NULL; 342 for(next = fq->q.fragments; next != NULL; next = next->next) { 343 if (FRAG6_CB(next)->offset >= offset) 344 break; /* bingo! */ 345 prev = next; 346 } 347 348 /* We found where to put this one. Check for overlap with 349 * preceding fragment, and, if needed, align things so that 350 * any overlaps are eliminated. 351 */ 352 if (prev) { 353 int i = (FRAG6_CB(prev)->offset + prev->len) - offset; 354 355 if (i > 0) { 356 offset += i; 357 if (end <= offset) 358 goto err; 359 if (!pskb_pull(skb, i)) 360 goto err; 361 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 362 skb->ip_summed = CHECKSUM_NONE; 363 } 364 } 365 366 /* Look for overlap with succeeding segments. 367 * If we can merge fragments, do it. 368 */ 369 while (next && FRAG6_CB(next)->offset < end) { 370 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ 371 372 if (i < next->len) { 373 /* Eat head of the next overlapped fragment 374 * and leave the loop. The next ones cannot overlap. 375 */ 376 if (!pskb_pull(next, i)) 377 goto err; 378 FRAG6_CB(next)->offset += i; /* next fragment */ 379 fq->q.meat -= i; 380 if (next->ip_summed != CHECKSUM_UNNECESSARY) 381 next->ip_summed = CHECKSUM_NONE; 382 break; 383 } else { 384 struct sk_buff *free_it = next; 385 386 /* Old fragment is completely overridden with 387 * new one drop it. 388 */ 389 next = next->next; 390 391 if (prev) 392 prev->next = next; 393 else 394 fq->q.fragments = next; 395 396 fq->q.meat -= free_it->len; 397 frag_kfree_skb(fq->q.net, free_it, NULL); 398 } 399 } 400 401 FRAG6_CB(skb)->offset = offset; 402 403 /* Insert this fragment in the chain of fragments. */ 404 skb->next = next; 405 if (prev) 406 prev->next = skb; 407 else 408 fq->q.fragments = skb; 409 410 dev = skb->dev; 411 if (dev) { 412 fq->iif = dev->ifindex; 413 skb->dev = NULL; 414 } 415 fq->q.stamp = skb->tstamp; 416 fq->q.meat += skb->len; 417 atomic_add(skb->truesize, &fq->q.net->mem); 418 419 /* The first fragment. 420 * nhoffset is obtained from the first fragment, of course. 421 */ 422 if (offset == 0) { 423 fq->nhoffset = nhoff; 424 fq->q.last_in |= INET_FRAG_FIRST_IN; 425 } 426 427 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 428 fq->q.meat == fq->q.len) 429 return ip6_frag_reasm(fq, prev, dev); 430 431 write_lock(&ip6_frags.lock); 432 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 433 write_unlock(&ip6_frags.lock); 434 return -1; 435 436 err: 437 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 438 IPSTATS_MIB_REASMFAILS); 439 kfree_skb(skb); 440 return -1; 441 } 442 443 /* 444 * Check if this packet is complete. 445 * Returns NULL on failure by any reason, and pointer 446 * to current nexthdr field in reassembled frame. 447 * 448 * It is called with locked fq, and caller must check that 449 * queue is eligible for reassembly i.e. it is not COMPLETE, 450 * the last and the first frames arrived and all the bits are here. 451 */ 452 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 453 struct net_device *dev) 454 { 455 struct sk_buff *fp, *head = fq->q.fragments; 456 int payload_len; 457 unsigned int nhoff; 458 459 fq_kill(fq); 460 461 /* Make the one we just received the head. */ 462 if (prev) { 463 head = prev->next; 464 fp = skb_clone(head, GFP_ATOMIC); 465 466 if (!fp) 467 goto out_oom; 468 469 fp->next = head->next; 470 prev->next = fp; 471 472 skb_morph(head, fq->q.fragments); 473 head->next = fq->q.fragments->next; 474 475 kfree_skb(fq->q.fragments); 476 fq->q.fragments = head; 477 } 478 479 WARN_ON(head == NULL); 480 WARN_ON(FRAG6_CB(head)->offset != 0); 481 482 /* Unfragmented part is taken from the first segment. */ 483 payload_len = ((head->data - skb_network_header(head)) - 484 sizeof(struct ipv6hdr) + fq->q.len - 485 sizeof(struct frag_hdr)); 486 if (payload_len > IPV6_MAXPLEN) 487 goto out_oversize; 488 489 /* Head of list must not be cloned. */ 490 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 491 goto out_oom; 492 493 /* If the first fragment is fragmented itself, we split 494 * it to two chunks: the first with data and paged part 495 * and the second, holding only fragments. */ 496 if (skb_shinfo(head)->frag_list) { 497 struct sk_buff *clone; 498 int i, plen = 0; 499 500 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 501 goto out_oom; 502 clone->next = head->next; 503 head->next = clone; 504 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 505 skb_shinfo(head)->frag_list = NULL; 506 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 507 plen += skb_shinfo(head)->frags[i].size; 508 clone->len = clone->data_len = head->data_len - plen; 509 head->data_len -= clone->len; 510 head->len -= clone->len; 511 clone->csum = 0; 512 clone->ip_summed = head->ip_summed; 513 atomic_add(clone->truesize, &fq->q.net->mem); 514 } 515 516 /* We have to remove fragment header from datagram and to relocate 517 * header in order to calculate ICV correctly. */ 518 nhoff = fq->nhoffset; 519 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 520 memmove(head->head + sizeof(struct frag_hdr), head->head, 521 (head->data - head->head) - sizeof(struct frag_hdr)); 522 head->mac_header += sizeof(struct frag_hdr); 523 head->network_header += sizeof(struct frag_hdr); 524 525 skb_shinfo(head)->frag_list = head->next; 526 skb_reset_transport_header(head); 527 skb_push(head, head->data - skb_network_header(head)); 528 atomic_sub(head->truesize, &fq->q.net->mem); 529 530 for (fp=head->next; fp; fp = fp->next) { 531 head->data_len += fp->len; 532 head->len += fp->len; 533 if (head->ip_summed != fp->ip_summed) 534 head->ip_summed = CHECKSUM_NONE; 535 else if (head->ip_summed == CHECKSUM_COMPLETE) 536 head->csum = csum_add(head->csum, fp->csum); 537 head->truesize += fp->truesize; 538 atomic_sub(fp->truesize, &fq->q.net->mem); 539 } 540 541 head->next = NULL; 542 head->dev = dev; 543 head->tstamp = fq->q.stamp; 544 ipv6_hdr(head)->payload_len = htons(payload_len); 545 IP6CB(head)->nhoff = nhoff; 546 547 /* Yes, and fold redundant checksum back. 8) */ 548 if (head->ip_summed == CHECKSUM_COMPLETE) 549 head->csum = csum_partial(skb_network_header(head), 550 skb_network_header_len(head), 551 head->csum); 552 553 rcu_read_lock(); 554 IP6_INC_STATS_BH(dev_net(dev), 555 __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 556 rcu_read_unlock(); 557 fq->q.fragments = NULL; 558 return 1; 559 560 out_oversize: 561 if (net_ratelimit()) 562 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); 563 goto out_fail; 564 out_oom: 565 if (net_ratelimit()) 566 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 567 out_fail: 568 rcu_read_lock(); 569 IP6_INC_STATS_BH(dev_net(dev), 570 __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 571 rcu_read_unlock(); 572 return -1; 573 } 574 575 static int ipv6_frag_rcv(struct sk_buff *skb) 576 { 577 struct frag_hdr *fhdr; 578 struct frag_queue *fq; 579 struct ipv6hdr *hdr = ipv6_hdr(skb); 580 struct net *net = dev_net(skb->dst->dev); 581 582 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); 583 584 /* Jumbo payload inhibits frag. header */ 585 if (hdr->payload_len==0) 586 goto fail_hdr; 587 588 if (!pskb_may_pull(skb, (skb_transport_offset(skb) + 589 sizeof(struct frag_hdr)))) 590 goto fail_hdr; 591 592 hdr = ipv6_hdr(skb); 593 fhdr = (struct frag_hdr *)skb_transport_header(skb); 594 595 if (!(fhdr->frag_off & htons(0xFFF9))) { 596 /* It is not a fragmented frame */ 597 skb->transport_header += sizeof(struct frag_hdr); 598 IP6_INC_STATS_BH(net, 599 ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); 600 601 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 602 return 1; 603 } 604 605 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 606 ip6_evictor(net, ip6_dst_idev(skb->dst)); 607 608 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 609 ip6_dst_idev(skb->dst))) != NULL) { 610 int ret; 611 612 spin_lock(&fq->q.lock); 613 614 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 615 616 spin_unlock(&fq->q.lock); 617 fq_put(fq); 618 return ret; 619 } 620 621 IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); 622 kfree_skb(skb); 623 return -1; 624 625 fail_hdr: 626 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 627 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 628 return -1; 629 } 630 631 static struct inet6_protocol frag_protocol = 632 { 633 .handler = ipv6_frag_rcv, 634 .flags = INET6_PROTO_NOPOLICY, 635 }; 636 637 #ifdef CONFIG_SYSCTL 638 static struct ctl_table ip6_frags_ns_ctl_table[] = { 639 { 640 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, 641 .procname = "ip6frag_high_thresh", 642 .data = &init_net.ipv6.frags.high_thresh, 643 .maxlen = sizeof(int), 644 .mode = 0644, 645 .proc_handler = &proc_dointvec 646 }, 647 { 648 .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH, 649 .procname = "ip6frag_low_thresh", 650 .data = &init_net.ipv6.frags.low_thresh, 651 .maxlen = sizeof(int), 652 .mode = 0644, 653 .proc_handler = &proc_dointvec 654 }, 655 { 656 .ctl_name = NET_IPV6_IP6FRAG_TIME, 657 .procname = "ip6frag_time", 658 .data = &init_net.ipv6.frags.timeout, 659 .maxlen = sizeof(int), 660 .mode = 0644, 661 .proc_handler = &proc_dointvec_jiffies, 662 .strategy = &sysctl_jiffies, 663 }, 664 { } 665 }; 666 667 static struct ctl_table ip6_frags_ctl_table[] = { 668 { 669 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, 670 .procname = "ip6frag_secret_interval", 671 .data = &ip6_frags.secret_interval, 672 .maxlen = sizeof(int), 673 .mode = 0644, 674 .proc_handler = &proc_dointvec_jiffies, 675 .strategy = &sysctl_jiffies 676 }, 677 { } 678 }; 679 680 static int ip6_frags_ns_sysctl_register(struct net *net) 681 { 682 struct ctl_table *table; 683 struct ctl_table_header *hdr; 684 685 table = ip6_frags_ns_ctl_table; 686 if (net != &init_net) { 687 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 688 if (table == NULL) 689 goto err_alloc; 690 691 table[0].data = &net->ipv6.frags.high_thresh; 692 table[1].data = &net->ipv6.frags.low_thresh; 693 table[2].data = &net->ipv6.frags.timeout; 694 } 695 696 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 697 if (hdr == NULL) 698 goto err_reg; 699 700 net->ipv6.sysctl.frags_hdr = hdr; 701 return 0; 702 703 err_reg: 704 if (net != &init_net) 705 kfree(table); 706 err_alloc: 707 return -ENOMEM; 708 } 709 710 static void ip6_frags_ns_sysctl_unregister(struct net *net) 711 { 712 struct ctl_table *table; 713 714 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 715 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 716 kfree(table); 717 } 718 719 static struct ctl_table_header *ip6_ctl_header; 720 721 static int ip6_frags_sysctl_register(void) 722 { 723 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 724 ip6_frags_ctl_table); 725 return ip6_ctl_header == NULL ? -ENOMEM : 0; 726 } 727 728 static void ip6_frags_sysctl_unregister(void) 729 { 730 unregister_net_sysctl_table(ip6_ctl_header); 731 } 732 #else 733 static inline int ip6_frags_ns_sysctl_register(struct net *net) 734 { 735 return 0; 736 } 737 738 static inline void ip6_frags_ns_sysctl_unregister(struct net *net) 739 { 740 } 741 742 static inline int ip6_frags_sysctl_register(void) 743 { 744 return 0; 745 } 746 747 static inline void ip6_frags_sysctl_unregister(void) 748 { 749 } 750 #endif 751 752 static int ipv6_frags_init_net(struct net *net) 753 { 754 net->ipv6.frags.high_thresh = 256 * 1024; 755 net->ipv6.frags.low_thresh = 192 * 1024; 756 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 757 758 inet_frags_init_net(&net->ipv6.frags); 759 760 return ip6_frags_ns_sysctl_register(net); 761 } 762 763 static void ipv6_frags_exit_net(struct net *net) 764 { 765 ip6_frags_ns_sysctl_unregister(net); 766 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 767 } 768 769 static struct pernet_operations ip6_frags_ops = { 770 .init = ipv6_frags_init_net, 771 .exit = ipv6_frags_exit_net, 772 }; 773 774 int __init ipv6_frag_init(void) 775 { 776 int ret; 777 778 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); 779 if (ret) 780 goto out; 781 782 ret = ip6_frags_sysctl_register(); 783 if (ret) 784 goto err_sysctl; 785 786 ret = register_pernet_subsys(&ip6_frags_ops); 787 if (ret) 788 goto err_pernet; 789 790 ip6_frags.hashfn = ip6_hashfn; 791 ip6_frags.constructor = ip6_frag_init; 792 ip6_frags.destructor = NULL; 793 ip6_frags.skb_free = NULL; 794 ip6_frags.qsize = sizeof(struct frag_queue); 795 ip6_frags.match = ip6_frag_match; 796 ip6_frags.frag_expire = ip6_frag_expire; 797 ip6_frags.secret_interval = 10 * 60 * HZ; 798 inet_frags_init(&ip6_frags); 799 out: 800 return ret; 801 802 err_pernet: 803 ip6_frags_sysctl_unregister(); 804 err_sysctl: 805 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 806 goto out; 807 } 808 809 void ipv6_frag_exit(void) 810 { 811 inet_frags_fini(&ip6_frags); 812 ip6_frags_sysctl_unregister(); 813 unregister_pernet_subsys(&ip6_frags_ops); 814 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 815 } 816