1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The IP fragmentation functionality. 7 * 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 9 * Alan Cox <alan@lxorguk.ukuu.org.uk> 10 * 11 * Fixes: 12 * Alan Cox : Split from ip.c , see ip_input.c for history. 13 * David S. Miller : Begin massive cleanup... 14 * Andi Kleen : Add sysctls. 15 * xxxx : Overlapfrag bug. 16 * Ultima : ip_expire() kernel panic. 17 * Bill Hawes : Frag accounting and evictor fixes. 18 * John McDonald : 0 length frag bug. 19 * Alexey Kuznetsov: SMP races, threading, cleanup. 20 * Patrick McHardy : LRU queue of frag heads for evictor. 21 */ 22 23 #define pr_fmt(fmt) "IPv4: " fmt 24 25 #include <linux/compiler.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/jiffies.h> 30 #include <linux/skbuff.h> 31 #include <linux/list.h> 32 #include <linux/ip.h> 33 #include <linux/icmp.h> 34 #include <linux/netdevice.h> 35 #include <linux/jhash.h> 36 #include <linux/random.h> 37 #include <linux/slab.h> 38 #include <net/route.h> 39 #include <net/dst.h> 40 #include <net/sock.h> 41 #include <net/ip.h> 42 #include <net/icmp.h> 43 #include <net/checksum.h> 44 #include <net/inetpeer.h> 45 #include <net/inet_frag.h> 46 #include <linux/tcp.h> 47 #include <linux/udp.h> 48 #include <linux/inet.h> 49 #include <linux/netfilter_ipv4.h> 50 #include <net/inet_ecn.h> 51 52 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 54 * as well. Or notify me, at least. --ANK 55 */ 56 57 static int sysctl_ipfrag_max_dist __read_mostly = 64; 58 static const char ip_frag_cache_name[] = "ip4-frags"; 59 60 struct ipfrag_skb_cb 61 { 62 struct inet_skb_parm h; 63 int offset; 64 }; 65 66 #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) 67 68 /* Describe an entry in the "incomplete datagrams" queue. */ 69 struct ipq { 70 struct inet_frag_queue q; 71 72 u32 user; 73 __be32 saddr; 74 __be32 daddr; 75 __be16 id; 76 u8 protocol; 77 u8 ecn; /* RFC3168 support */ 78 int iif; 79 unsigned int rid; 80 struct inet_peer *peer; 81 }; 82 83 static u8 ip4_frag_ecn(u8 tos) 84 { 85 return 1 << (tos & INET_ECN_MASK); 86 } 87 88 static struct inet_frags ip4_frags; 89 90 int ip_frag_mem(struct net *net) 91 { 92 return sum_frag_mem_limit(&net->ipv4.frags); 93 } 94 95 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 96 struct net_device *dev); 97 98 struct ip4_create_arg { 99 struct iphdr *iph; 100 u32 user; 101 }; 102 103 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) 104 { 105 net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); 106 return jhash_3words((__force u32)id << 16 | prot, 107 (__force u32)saddr, (__force u32)daddr, 108 ip4_frags.rnd); 109 } 110 111 static unsigned int ip4_hashfn(const struct inet_frag_queue *q) 112 { 113 const struct ipq *ipq; 114 115 ipq = container_of(q, struct ipq, q); 116 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); 117 } 118 119 static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) 120 { 121 const struct ipq *qp; 122 const struct ip4_create_arg *arg = a; 123 124 qp = container_of(q, struct ipq, q); 125 return qp->id == arg->iph->id && 126 qp->saddr == arg->iph->saddr && 127 qp->daddr == arg->iph->daddr && 128 qp->protocol == arg->iph->protocol && 129 qp->user == arg->user; 130 } 131 132 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) 133 { 134 struct ipq *qp = container_of(q, struct ipq, q); 135 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, 136 frags); 137 struct net *net = container_of(ipv4, struct net, ipv4); 138 139 const struct ip4_create_arg *arg = a; 140 141 qp->protocol = arg->iph->protocol; 142 qp->id = arg->iph->id; 143 qp->ecn = ip4_frag_ecn(arg->iph->tos); 144 qp->saddr = arg->iph->saddr; 145 qp->daddr = arg->iph->daddr; 146 qp->user = arg->user; 147 qp->peer = sysctl_ipfrag_max_dist ? 148 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; 149 } 150 151 static void ip4_frag_free(struct inet_frag_queue *q) 152 { 153 struct ipq *qp; 154 155 qp = container_of(q, struct ipq, q); 156 if (qp->peer) 157 inet_putpeer(qp->peer); 158 } 159 160 161 /* Destruction primitives. */ 162 163 static void ipq_put(struct ipq *ipq) 164 { 165 inet_frag_put(&ipq->q, &ip4_frags); 166 } 167 168 /* Kill ipq entry. It is not destroyed immediately, 169 * because caller (and someone more) holds reference count. 170 */ 171 static void ipq_kill(struct ipq *ipq) 172 { 173 inet_frag_kill(&ipq->q, &ip4_frags); 174 } 175 176 /* 177 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 178 */ 179 static void ip_expire(unsigned long arg) 180 { 181 struct ipq *qp; 182 struct net *net; 183 184 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 185 net = container_of(qp->q.net, struct net, ipv4.frags); 186 187 spin_lock(&qp->q.lock); 188 189 if (qp->q.flags & INET_FRAG_COMPLETE) 190 goto out; 191 192 ipq_kill(qp); 193 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 194 195 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 196 struct sk_buff *head = qp->q.fragments; 197 const struct iphdr *iph; 198 int err; 199 200 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); 201 202 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) 203 goto out; 204 205 rcu_read_lock(); 206 head->dev = dev_get_by_index_rcu(net, qp->iif); 207 if (!head->dev) 208 goto out_rcu_unlock; 209 210 /* skb has no dst, perform route lookup again */ 211 iph = ip_hdr(head); 212 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 213 iph->tos, head->dev); 214 if (err) 215 goto out_rcu_unlock; 216 217 /* Only an end host needs to send an ICMP 218 * "Fragment Reassembly Timeout" message, per RFC792. 219 */ 220 if (qp->user == IP_DEFRAG_AF_PACKET || 221 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) && 222 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) && 223 (skb_rtable(head)->rt_type != RTN_LOCAL))) 224 goto out_rcu_unlock; 225 226 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 227 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 228 out_rcu_unlock: 229 rcu_read_unlock(); 230 } 231 out: 232 spin_unlock(&qp->q.lock); 233 ipq_put(qp); 234 } 235 236 /* Find the correct entry in the "incomplete datagrams" queue for 237 * this IP datagram, and create new one, if nothing is found. 238 */ 239 static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) 240 { 241 struct inet_frag_queue *q; 242 struct ip4_create_arg arg; 243 unsigned int hash; 244 245 arg.iph = iph; 246 arg.user = user; 247 248 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 249 250 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 251 if (IS_ERR_OR_NULL(q)) { 252 inet_frag_maybe_warn_overflow(q, pr_fmt()); 253 return NULL; 254 } 255 return container_of(q, struct ipq, q); 256 } 257 258 /* Is the fragment too far ahead to be part of ipq? */ 259 static int ip_frag_too_far(struct ipq *qp) 260 { 261 struct inet_peer *peer = qp->peer; 262 unsigned int max = sysctl_ipfrag_max_dist; 263 unsigned int start, end; 264 265 int rc; 266 267 if (!peer || !max) 268 return 0; 269 270 start = qp->rid; 271 end = atomic_inc_return(&peer->rid); 272 qp->rid = end; 273 274 rc = qp->q.fragments && (end - start) > max; 275 276 if (rc) { 277 struct net *net; 278 279 net = container_of(qp->q.net, struct net, ipv4.frags); 280 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 281 } 282 283 return rc; 284 } 285 286 static int ip_frag_reinit(struct ipq *qp) 287 { 288 struct sk_buff *fp; 289 unsigned int sum_truesize = 0; 290 291 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { 292 atomic_inc(&qp->q.refcnt); 293 return -ETIMEDOUT; 294 } 295 296 fp = qp->q.fragments; 297 do { 298 struct sk_buff *xp = fp->next; 299 300 sum_truesize += fp->truesize; 301 kfree_skb(fp); 302 fp = xp; 303 } while (fp); 304 sub_frag_mem_limit(&qp->q, sum_truesize); 305 306 qp->q.flags = 0; 307 qp->q.len = 0; 308 qp->q.meat = 0; 309 qp->q.fragments = NULL; 310 qp->q.fragments_tail = NULL; 311 qp->iif = 0; 312 qp->ecn = 0; 313 314 return 0; 315 } 316 317 /* Add new segment to existing queue. */ 318 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 319 { 320 struct sk_buff *prev, *next; 321 struct net_device *dev; 322 int flags, offset; 323 int ihl, end; 324 int err = -ENOENT; 325 u8 ecn; 326 327 if (qp->q.flags & INET_FRAG_COMPLETE) 328 goto err; 329 330 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 331 unlikely(ip_frag_too_far(qp)) && 332 unlikely(err = ip_frag_reinit(qp))) { 333 ipq_kill(qp); 334 goto err; 335 } 336 337 ecn = ip4_frag_ecn(ip_hdr(skb)->tos); 338 offset = ntohs(ip_hdr(skb)->frag_off); 339 flags = offset & ~IP_OFFSET; 340 offset &= IP_OFFSET; 341 offset <<= 3; /* offset is in 8-byte chunks */ 342 ihl = ip_hdrlen(skb); 343 344 /* Determine the position of this fragment. */ 345 end = offset + skb->len - ihl; 346 err = -EINVAL; 347 348 /* Is this the final fragment? */ 349 if ((flags & IP_MF) == 0) { 350 /* If we already have some bits beyond end 351 * or have different end, the segment is corrupted. 352 */ 353 if (end < qp->q.len || 354 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) 355 goto err; 356 qp->q.flags |= INET_FRAG_LAST_IN; 357 qp->q.len = end; 358 } else { 359 if (end&7) { 360 end &= ~7; 361 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 362 skb->ip_summed = CHECKSUM_NONE; 363 } 364 if (end > qp->q.len) { 365 /* Some bits beyond end -> corruption. */ 366 if (qp->q.flags & INET_FRAG_LAST_IN) 367 goto err; 368 qp->q.len = end; 369 } 370 } 371 if (end == offset) 372 goto err; 373 374 err = -ENOMEM; 375 if (pskb_pull(skb, ihl) == NULL) 376 goto err; 377 378 err = pskb_trim_rcsum(skb, end - offset); 379 if (err) 380 goto err; 381 382 /* Find out which fragments are in front and at the back of us 383 * in the chain of fragments so far. We must know where to put 384 * this fragment, right? 385 */ 386 prev = qp->q.fragments_tail; 387 if (!prev || FRAG_CB(prev)->offset < offset) { 388 next = NULL; 389 goto found; 390 } 391 prev = NULL; 392 for (next = qp->q.fragments; next != NULL; next = next->next) { 393 if (FRAG_CB(next)->offset >= offset) 394 break; /* bingo! */ 395 prev = next; 396 } 397 398 found: 399 /* We found where to put this one. Check for overlap with 400 * preceding fragment, and, if needed, align things so that 401 * any overlaps are eliminated. 402 */ 403 if (prev) { 404 int i = (FRAG_CB(prev)->offset + prev->len) - offset; 405 406 if (i > 0) { 407 offset += i; 408 err = -EINVAL; 409 if (end <= offset) 410 goto err; 411 err = -ENOMEM; 412 if (!pskb_pull(skb, i)) 413 goto err; 414 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 415 skb->ip_summed = CHECKSUM_NONE; 416 } 417 } 418 419 err = -ENOMEM; 420 421 while (next && FRAG_CB(next)->offset < end) { 422 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ 423 424 if (i < next->len) { 425 /* Eat head of the next overlapped fragment 426 * and leave the loop. The next ones cannot overlap. 427 */ 428 if (!pskb_pull(next, i)) 429 goto err; 430 FRAG_CB(next)->offset += i; 431 qp->q.meat -= i; 432 if (next->ip_summed != CHECKSUM_UNNECESSARY) 433 next->ip_summed = CHECKSUM_NONE; 434 break; 435 } else { 436 struct sk_buff *free_it = next; 437 438 /* Old fragment is completely overridden with 439 * new one drop it. 440 */ 441 next = next->next; 442 443 if (prev) 444 prev->next = next; 445 else 446 qp->q.fragments = next; 447 448 qp->q.meat -= free_it->len; 449 sub_frag_mem_limit(&qp->q, free_it->truesize); 450 kfree_skb(free_it); 451 } 452 } 453 454 FRAG_CB(skb)->offset = offset; 455 456 /* Insert this fragment in the chain of fragments. */ 457 skb->next = next; 458 if (!next) 459 qp->q.fragments_tail = skb; 460 if (prev) 461 prev->next = skb; 462 else 463 qp->q.fragments = skb; 464 465 dev = skb->dev; 466 if (dev) { 467 qp->iif = dev->ifindex; 468 skb->dev = NULL; 469 } 470 qp->q.stamp = skb->tstamp; 471 qp->q.meat += skb->len; 472 qp->ecn |= ecn; 473 add_frag_mem_limit(&qp->q, skb->truesize); 474 if (offset == 0) 475 qp->q.flags |= INET_FRAG_FIRST_IN; 476 477 if (ip_hdr(skb)->frag_off & htons(IP_DF) && 478 skb->len + ihl > qp->q.max_size) 479 qp->q.max_size = skb->len + ihl; 480 481 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 482 qp->q.meat == qp->q.len) { 483 unsigned long orefdst = skb->_skb_refdst; 484 485 skb->_skb_refdst = 0UL; 486 err = ip_frag_reasm(qp, prev, dev); 487 skb->_skb_refdst = orefdst; 488 return err; 489 } 490 491 skb_dst_drop(skb); 492 return -EINPROGRESS; 493 494 err: 495 kfree_skb(skb); 496 return err; 497 } 498 499 500 /* Build a new IP datagram from all its fragments. */ 501 502 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 503 struct net_device *dev) 504 { 505 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 506 struct iphdr *iph; 507 struct sk_buff *fp, *head = qp->q.fragments; 508 int len; 509 int ihlen; 510 int err; 511 int sum_truesize; 512 u8 ecn; 513 514 ipq_kill(qp); 515 516 ecn = ip_frag_ecn_table[qp->ecn]; 517 if (unlikely(ecn == 0xff)) { 518 err = -EINVAL; 519 goto out_fail; 520 } 521 /* Make the one we just received the head. */ 522 if (prev) { 523 head = prev->next; 524 fp = skb_clone(head, GFP_ATOMIC); 525 if (!fp) 526 goto out_nomem; 527 528 fp->next = head->next; 529 if (!fp->next) 530 qp->q.fragments_tail = fp; 531 prev->next = fp; 532 533 skb_morph(head, qp->q.fragments); 534 head->next = qp->q.fragments->next; 535 536 consume_skb(qp->q.fragments); 537 qp->q.fragments = head; 538 } 539 540 WARN_ON(head == NULL); 541 WARN_ON(FRAG_CB(head)->offset != 0); 542 543 /* Allocate a new buffer for the datagram. */ 544 ihlen = ip_hdrlen(head); 545 len = ihlen + qp->q.len; 546 547 err = -E2BIG; 548 if (len > 65535) 549 goto out_oversize; 550 551 /* Head of list must not be cloned. */ 552 if (skb_unclone(head, GFP_ATOMIC)) 553 goto out_nomem; 554 555 /* If the first fragment is fragmented itself, we split 556 * it to two chunks: the first with data and paged part 557 * and the second, holding only fragments. */ 558 if (skb_has_frag_list(head)) { 559 struct sk_buff *clone; 560 int i, plen = 0; 561 562 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 563 goto out_nomem; 564 clone->next = head->next; 565 head->next = clone; 566 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 567 skb_frag_list_init(head); 568 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 569 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 570 clone->len = clone->data_len = head->data_len - plen; 571 head->data_len -= clone->len; 572 head->len -= clone->len; 573 clone->csum = 0; 574 clone->ip_summed = head->ip_summed; 575 add_frag_mem_limit(&qp->q, clone->truesize); 576 } 577 578 skb_push(head, head->data - skb_network_header(head)); 579 580 sum_truesize = head->truesize; 581 for (fp = head->next; fp;) { 582 bool headstolen; 583 int delta; 584 struct sk_buff *next = fp->next; 585 586 sum_truesize += fp->truesize; 587 if (head->ip_summed != fp->ip_summed) 588 head->ip_summed = CHECKSUM_NONE; 589 else if (head->ip_summed == CHECKSUM_COMPLETE) 590 head->csum = csum_add(head->csum, fp->csum); 591 592 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { 593 kfree_skb_partial(fp, headstolen); 594 } else { 595 if (!skb_shinfo(head)->frag_list) 596 skb_shinfo(head)->frag_list = fp; 597 head->data_len += fp->len; 598 head->len += fp->len; 599 head->truesize += fp->truesize; 600 } 601 fp = next; 602 } 603 sub_frag_mem_limit(&qp->q, sum_truesize); 604 605 head->next = NULL; 606 head->dev = dev; 607 head->tstamp = qp->q.stamp; 608 IPCB(head)->frag_max_size = qp->q.max_size; 609 610 iph = ip_hdr(head); 611 /* max_size != 0 implies at least one fragment had IP_DF set */ 612 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; 613 iph->tot_len = htons(len); 614 iph->tos |= ecn; 615 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 616 qp->q.fragments = NULL; 617 qp->q.fragments_tail = NULL; 618 return 0; 619 620 out_nomem: 621 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); 622 err = -ENOMEM; 623 goto out_fail; 624 out_oversize: 625 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); 626 out_fail: 627 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 628 return err; 629 } 630 631 /* Process an incoming IP datagram fragment. */ 632 int ip_defrag(struct sk_buff *skb, u32 user) 633 { 634 struct ipq *qp; 635 struct net *net; 636 637 net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); 638 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 639 640 /* Lookup (or create) queue header */ 641 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { 642 int ret; 643 644 spin_lock(&qp->q.lock); 645 646 ret = ip_frag_queue(qp, skb); 647 648 spin_unlock(&qp->q.lock); 649 ipq_put(qp); 650 return ret; 651 } 652 653 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 654 kfree_skb(skb); 655 return -ENOMEM; 656 } 657 EXPORT_SYMBOL(ip_defrag); 658 659 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 660 { 661 struct iphdr iph; 662 u32 len; 663 664 if (skb->protocol != htons(ETH_P_IP)) 665 return skb; 666 667 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) 668 return skb; 669 670 if (iph.ihl < 5 || iph.version != 4) 671 return skb; 672 673 len = ntohs(iph.tot_len); 674 if (skb->len < len || len < (iph.ihl * 4)) 675 return skb; 676 677 if (ip_is_fragment(&iph)) { 678 skb = skb_share_check(skb, GFP_ATOMIC); 679 if (skb) { 680 if (!pskb_may_pull(skb, iph.ihl*4)) 681 return skb; 682 if (pskb_trim_rcsum(skb, len)) 683 return skb; 684 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 685 if (ip_defrag(skb, user)) 686 return NULL; 687 skb_clear_hash(skb); 688 } 689 } 690 return skb; 691 } 692 EXPORT_SYMBOL(ip_check_defrag); 693 694 #ifdef CONFIG_SYSCTL 695 static int zero; 696 697 static struct ctl_table ip4_frags_ns_ctl_table[] = { 698 { 699 .procname = "ipfrag_high_thresh", 700 .data = &init_net.ipv4.frags.high_thresh, 701 .maxlen = sizeof(int), 702 .mode = 0644, 703 .proc_handler = proc_dointvec_minmax, 704 .extra1 = &init_net.ipv4.frags.low_thresh 705 }, 706 { 707 .procname = "ipfrag_low_thresh", 708 .data = &init_net.ipv4.frags.low_thresh, 709 .maxlen = sizeof(int), 710 .mode = 0644, 711 .proc_handler = proc_dointvec_minmax, 712 .extra1 = &zero, 713 .extra2 = &init_net.ipv4.frags.high_thresh 714 }, 715 { 716 .procname = "ipfrag_time", 717 .data = &init_net.ipv4.frags.timeout, 718 .maxlen = sizeof(int), 719 .mode = 0644, 720 .proc_handler = proc_dointvec_jiffies, 721 }, 722 { } 723 }; 724 725 /* secret interval has been deprecated */ 726 static int ip4_frags_secret_interval_unused; 727 static struct ctl_table ip4_frags_ctl_table[] = { 728 { 729 .procname = "ipfrag_secret_interval", 730 .data = &ip4_frags_secret_interval_unused, 731 .maxlen = sizeof(int), 732 .mode = 0644, 733 .proc_handler = proc_dointvec_jiffies, 734 }, 735 { 736 .procname = "ipfrag_max_dist", 737 .data = &sysctl_ipfrag_max_dist, 738 .maxlen = sizeof(int), 739 .mode = 0644, 740 .proc_handler = proc_dointvec_minmax, 741 .extra1 = &zero 742 }, 743 { } 744 }; 745 746 static int __net_init ip4_frags_ns_ctl_register(struct net *net) 747 { 748 struct ctl_table *table; 749 struct ctl_table_header *hdr; 750 751 table = ip4_frags_ns_ctl_table; 752 if (!net_eq(net, &init_net)) { 753 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 754 if (table == NULL) 755 goto err_alloc; 756 757 table[0].data = &net->ipv4.frags.high_thresh; 758 table[0].extra1 = &net->ipv4.frags.low_thresh; 759 table[0].extra2 = &init_net.ipv4.frags.high_thresh; 760 table[1].data = &net->ipv4.frags.low_thresh; 761 table[1].extra2 = &net->ipv4.frags.high_thresh; 762 table[2].data = &net->ipv4.frags.timeout; 763 764 /* Don't export sysctls to unprivileged users */ 765 if (net->user_ns != &init_user_ns) 766 table[0].procname = NULL; 767 } 768 769 hdr = register_net_sysctl(net, "net/ipv4", table); 770 if (hdr == NULL) 771 goto err_reg; 772 773 net->ipv4.frags_hdr = hdr; 774 return 0; 775 776 err_reg: 777 if (!net_eq(net, &init_net)) 778 kfree(table); 779 err_alloc: 780 return -ENOMEM; 781 } 782 783 static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) 784 { 785 struct ctl_table *table; 786 787 table = net->ipv4.frags_hdr->ctl_table_arg; 788 unregister_net_sysctl_table(net->ipv4.frags_hdr); 789 kfree(table); 790 } 791 792 static void __init ip4_frags_ctl_register(void) 793 { 794 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); 795 } 796 #else 797 static int ip4_frags_ns_ctl_register(struct net *net) 798 { 799 return 0; 800 } 801 802 static void ip4_frags_ns_ctl_unregister(struct net *net) 803 { 804 } 805 806 static void __init ip4_frags_ctl_register(void) 807 { 808 } 809 #endif 810 811 static int __net_init ipv4_frags_init_net(struct net *net) 812 { 813 /* Fragment cache limits. 814 * 815 * The fragment memory accounting code, (tries to) account for 816 * the real memory usage, by measuring both the size of frag 817 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) 818 * and the SKB's truesize. 819 * 820 * A 64K fragment consumes 129736 bytes (44*2944)+200 821 * (1500 truesize == 2944, sizeof(struct ipq) == 200) 822 * 823 * We will commit 4MB at one time. Should we cross that limit 824 * we will prune down to 3MB, making room for approx 8 big 64K 825 * fragments 8x128k. 826 */ 827 net->ipv4.frags.high_thresh = 4 * 1024 * 1024; 828 net->ipv4.frags.low_thresh = 3 * 1024 * 1024; 829 /* 830 * Important NOTE! Fragment queue must be destroyed before MSL expires. 831 * RFC791 is wrong proposing to prolongate timer each fragment arrival 832 * by TTL. 833 */ 834 net->ipv4.frags.timeout = IP_FRAG_TIME; 835 836 inet_frags_init_net(&net->ipv4.frags); 837 838 return ip4_frags_ns_ctl_register(net); 839 } 840 841 static void __net_exit ipv4_frags_exit_net(struct net *net) 842 { 843 ip4_frags_ns_ctl_unregister(net); 844 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 845 } 846 847 static struct pernet_operations ip4_frags_ops = { 848 .init = ipv4_frags_init_net, 849 .exit = ipv4_frags_exit_net, 850 }; 851 852 void __init ipfrag_init(void) 853 { 854 ip4_frags_ctl_register(); 855 register_pernet_subsys(&ip4_frags_ops); 856 ip4_frags.hashfn = ip4_hashfn; 857 ip4_frags.constructor = ip4_frag_init; 858 ip4_frags.destructor = ip4_frag_free; 859 ip4_frags.skb_free = NULL; 860 ip4_frags.qsize = sizeof(struct ipq); 861 ip4_frags.match = ip4_frag_match; 862 ip4_frags.frag_expire = ip_expire; 863 ip4_frags.frags_cache_name = ip_frag_cache_name; 864 if (inet_frags_init(&ip4_frags)) 865 panic("IP: failed to allocate ip4_frags cache\n"); 866 } 867