1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The IP fragmentation functionality. 7 * 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 9 * Alan Cox <alan@lxorguk.ukuu.org.uk> 10 * 11 * Fixes: 12 * Alan Cox : Split from ip.c , see ip_input.c for history. 13 * David S. Miller : Begin massive cleanup... 14 * Andi Kleen : Add sysctls. 15 * xxxx : Overlapfrag bug. 16 * Ultima : ip_expire() kernel panic. 17 * Bill Hawes : Frag accounting and evictor fixes. 18 * John McDonald : 0 length frag bug. 19 * Alexey Kuznetsov: SMP races, threading, cleanup. 20 * Patrick McHardy : LRU queue of frag heads for evictor. 21 */ 22 23 #define pr_fmt(fmt) "IPv4: " fmt 24 25 #include <linux/compiler.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/jiffies.h> 30 #include <linux/skbuff.h> 31 #include <linux/list.h> 32 #include <linux/ip.h> 33 #include <linux/icmp.h> 34 #include <linux/netdevice.h> 35 #include <linux/jhash.h> 36 #include <linux/random.h> 37 #include <linux/slab.h> 38 #include <net/route.h> 39 #include <net/dst.h> 40 #include <net/sock.h> 41 #include <net/ip.h> 42 #include <net/icmp.h> 43 #include <net/checksum.h> 44 #include <net/inetpeer.h> 45 #include <net/inet_frag.h> 46 #include <linux/tcp.h> 47 #include <linux/udp.h> 48 #include <linux/inet.h> 49 #include <linux/netfilter_ipv4.h> 50 #include <net/inet_ecn.h> 51 52 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 54 * as well. Or notify me, at least. --ANK 55 */ 56 57 static int sysctl_ipfrag_max_dist __read_mostly = 64; 58 static const char ip_frag_cache_name[] = "ip4-frags"; 59 60 struct ipfrag_skb_cb 61 { 62 struct inet_skb_parm h; 63 int offset; 64 }; 65 66 #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) 67 68 /* Describe an entry in the "incomplete datagrams" queue. */ 69 struct ipq { 70 struct inet_frag_queue q; 71 72 u32 user; 73 __be32 saddr; 74 __be32 daddr; 75 __be16 id; 76 u8 protocol; 77 u8 ecn; /* RFC3168 support */ 78 int iif; 79 unsigned int rid; 80 struct inet_peer *peer; 81 }; 82 83 static u8 ip4_frag_ecn(u8 tos) 84 { 85 return 1 << (tos & INET_ECN_MASK); 86 } 87 88 static struct inet_frags ip4_frags; 89 90 int ip_frag_mem(struct net *net) 91 { 92 return sum_frag_mem_limit(&net->ipv4.frags); 93 } 94 95 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 96 struct net_device *dev); 97 98 struct ip4_create_arg { 99 struct iphdr *iph; 100 u32 user; 101 }; 102 103 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) 104 { 105 net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); 106 return jhash_3words((__force u32)id << 16 | prot, 107 (__force u32)saddr, (__force u32)daddr, 108 ip4_frags.rnd); 109 } 110 111 static unsigned int ip4_hashfn(const struct inet_frag_queue *q) 112 { 113 const struct ipq *ipq; 114 115 ipq = container_of(q, struct ipq, q); 116 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); 117 } 118 119 static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) 120 { 121 const struct ipq *qp; 122 const struct ip4_create_arg *arg = a; 123 124 qp = container_of(q, struct ipq, q); 125 return qp->id == arg->iph->id && 126 qp->saddr == arg->iph->saddr && 127 qp->daddr == arg->iph->daddr && 128 qp->protocol == arg->iph->protocol && 129 qp->user == arg->user; 130 } 131 132 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) 133 { 134 struct ipq *qp = container_of(q, struct ipq, q); 135 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, 136 frags); 137 struct net *net = container_of(ipv4, struct net, ipv4); 138 139 const struct ip4_create_arg *arg = a; 140 141 qp->protocol = arg->iph->protocol; 142 qp->id = arg->iph->id; 143 qp->ecn = ip4_frag_ecn(arg->iph->tos); 144 qp->saddr = arg->iph->saddr; 145 qp->daddr = arg->iph->daddr; 146 qp->user = arg->user; 147 qp->peer = sysctl_ipfrag_max_dist ? 148 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; 149 } 150 151 static void ip4_frag_free(struct inet_frag_queue *q) 152 { 153 struct ipq *qp; 154 155 qp = container_of(q, struct ipq, q); 156 if (qp->peer) 157 inet_putpeer(qp->peer); 158 } 159 160 161 /* Destruction primitives. */ 162 163 static void ipq_put(struct ipq *ipq) 164 { 165 inet_frag_put(&ipq->q, &ip4_frags); 166 } 167 168 /* Kill ipq entry. It is not destroyed immediately, 169 * because caller (and someone more) holds reference count. 170 */ 171 static void ipq_kill(struct ipq *ipq) 172 { 173 inet_frag_kill(&ipq->q, &ip4_frags); 174 } 175 176 /* 177 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 178 */ 179 static void ip_expire(unsigned long arg) 180 { 181 struct ipq *qp; 182 struct net *net; 183 184 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 185 net = container_of(qp->q.net, struct net, ipv4.frags); 186 187 spin_lock(&qp->q.lock); 188 189 if (qp->q.flags & INET_FRAG_COMPLETE) 190 goto out; 191 192 ipq_kill(qp); 193 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 194 195 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 196 struct sk_buff *head = qp->q.fragments; 197 const struct iphdr *iph; 198 int err; 199 200 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); 201 202 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) 203 goto out; 204 205 rcu_read_lock(); 206 head->dev = dev_get_by_index_rcu(net, qp->iif); 207 if (!head->dev) 208 goto out_rcu_unlock; 209 210 /* skb has no dst, perform route lookup again */ 211 iph = ip_hdr(head); 212 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 213 iph->tos, head->dev); 214 if (err) 215 goto out_rcu_unlock; 216 217 /* Only an end host needs to send an ICMP 218 * "Fragment Reassembly Timeout" message, per RFC792. 219 */ 220 if (qp->user == IP_DEFRAG_AF_PACKET || 221 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) && 222 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) && 223 (skb_rtable(head)->rt_type != RTN_LOCAL))) 224 goto out_rcu_unlock; 225 226 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 227 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 228 out_rcu_unlock: 229 rcu_read_unlock(); 230 } 231 out: 232 spin_unlock(&qp->q.lock); 233 ipq_put(qp); 234 } 235 236 /* Find the correct entry in the "incomplete datagrams" queue for 237 * this IP datagram, and create new one, if nothing is found. 238 */ 239 static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) 240 { 241 struct inet_frag_queue *q; 242 struct ip4_create_arg arg; 243 unsigned int hash; 244 245 arg.iph = iph; 246 arg.user = user; 247 248 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 249 250 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 251 if (IS_ERR_OR_NULL(q)) { 252 inet_frag_maybe_warn_overflow(q, pr_fmt()); 253 return NULL; 254 } 255 return container_of(q, struct ipq, q); 256 } 257 258 /* Is the fragment too far ahead to be part of ipq? */ 259 static int ip_frag_too_far(struct ipq *qp) 260 { 261 struct inet_peer *peer = qp->peer; 262 unsigned int max = sysctl_ipfrag_max_dist; 263 unsigned int start, end; 264 265 int rc; 266 267 if (!peer || !max) 268 return 0; 269 270 start = qp->rid; 271 end = atomic_inc_return(&peer->rid); 272 qp->rid = end; 273 274 rc = qp->q.fragments && (end - start) > max; 275 276 if (rc) { 277 struct net *net; 278 279 net = container_of(qp->q.net, struct net, ipv4.frags); 280 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 281 } 282 283 return rc; 284 } 285 286 static int ip_frag_reinit(struct ipq *qp) 287 { 288 struct sk_buff *fp; 289 unsigned int sum_truesize = 0; 290 291 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { 292 atomic_inc(&qp->q.refcnt); 293 return -ETIMEDOUT; 294 } 295 296 fp = qp->q.fragments; 297 do { 298 struct sk_buff *xp = fp->next; 299 300 sum_truesize += fp->truesize; 301 kfree_skb(fp); 302 fp = xp; 303 } while (fp); 304 sub_frag_mem_limit(&qp->q, sum_truesize); 305 306 qp->q.flags = 0; 307 qp->q.len = 0; 308 qp->q.meat = 0; 309 qp->q.fragments = NULL; 310 qp->q.fragments_tail = NULL; 311 qp->iif = 0; 312 qp->ecn = 0; 313 314 return 0; 315 } 316 317 /* Add new segment to existing queue. */ 318 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 319 { 320 struct sk_buff *prev, *next; 321 struct net_device *dev; 322 int flags, offset; 323 int ihl, end; 324 int err = -ENOENT; 325 u8 ecn; 326 327 if (qp->q.flags & INET_FRAG_COMPLETE) 328 goto err; 329 330 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 331 unlikely(ip_frag_too_far(qp)) && 332 unlikely(err = ip_frag_reinit(qp))) { 333 ipq_kill(qp); 334 goto err; 335 } 336 337 ecn = ip4_frag_ecn(ip_hdr(skb)->tos); 338 offset = ntohs(ip_hdr(skb)->frag_off); 339 flags = offset & ~IP_OFFSET; 340 offset &= IP_OFFSET; 341 offset <<= 3; /* offset is in 8-byte chunks */ 342 ihl = ip_hdrlen(skb); 343 344 /* Determine the position of this fragment. */ 345 end = offset + skb->len - ihl; 346 err = -EINVAL; 347 348 /* Is this the final fragment? */ 349 if ((flags & IP_MF) == 0) { 350 /* If we already have some bits beyond end 351 * or have different end, the segment is corrupted. 352 */ 353 if (end < qp->q.len || 354 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) 355 goto err; 356 qp->q.flags |= INET_FRAG_LAST_IN; 357 qp->q.len = end; 358 } else { 359 if (end&7) { 360 end &= ~7; 361 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 362 skb->ip_summed = CHECKSUM_NONE; 363 } 364 if (end > qp->q.len) { 365 /* Some bits beyond end -> corruption. */ 366 if (qp->q.flags & INET_FRAG_LAST_IN) 367 goto err; 368 qp->q.len = end; 369 } 370 } 371 if (end == offset) 372 goto err; 373 374 err = -ENOMEM; 375 if (!pskb_pull(skb, ihl)) 376 goto err; 377 378 err = pskb_trim_rcsum(skb, end - offset); 379 if (err) 380 goto err; 381 382 /* Find out which fragments are in front and at the back of us 383 * in the chain of fragments so far. We must know where to put 384 * this fragment, right? 385 */ 386 prev = qp->q.fragments_tail; 387 if (!prev || FRAG_CB(prev)->offset < offset) { 388 next = NULL; 389 goto found; 390 } 391 prev = NULL; 392 for (next = qp->q.fragments; next != NULL; next = next->next) { 393 if (FRAG_CB(next)->offset >= offset) 394 break; /* bingo! */ 395 prev = next; 396 } 397 398 found: 399 /* We found where to put this one. Check for overlap with 400 * preceding fragment, and, if needed, align things so that 401 * any overlaps are eliminated. 402 */ 403 if (prev) { 404 int i = (FRAG_CB(prev)->offset + prev->len) - offset; 405 406 if (i > 0) { 407 offset += i; 408 err = -EINVAL; 409 if (end <= offset) 410 goto err; 411 err = -ENOMEM; 412 if (!pskb_pull(skb, i)) 413 goto err; 414 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 415 skb->ip_summed = CHECKSUM_NONE; 416 } 417 } 418 419 err = -ENOMEM; 420 421 while (next && FRAG_CB(next)->offset < end) { 422 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ 423 424 if (i < next->len) { 425 /* Eat head of the next overlapped fragment 426 * and leave the loop. The next ones cannot overlap. 427 */ 428 if (!pskb_pull(next, i)) 429 goto err; 430 FRAG_CB(next)->offset += i; 431 qp->q.meat -= i; 432 if (next->ip_summed != CHECKSUM_UNNECESSARY) 433 next->ip_summed = CHECKSUM_NONE; 434 break; 435 } else { 436 struct sk_buff *free_it = next; 437 438 /* Old fragment is completely overridden with 439 * new one drop it. 440 */ 441 next = next->next; 442 443 if (prev) 444 prev->next = next; 445 else 446 qp->q.fragments = next; 447 448 qp->q.meat -= free_it->len; 449 sub_frag_mem_limit(&qp->q, free_it->truesize); 450 kfree_skb(free_it); 451 } 452 } 453 454 FRAG_CB(skb)->offset = offset; 455 456 /* Insert this fragment in the chain of fragments. */ 457 skb->next = next; 458 if (!next) 459 qp->q.fragments_tail = skb; 460 if (prev) 461 prev->next = skb; 462 else 463 qp->q.fragments = skb; 464 465 dev = skb->dev; 466 if (dev) { 467 qp->iif = dev->ifindex; 468 skb->dev = NULL; 469 } 470 qp->q.stamp = skb->tstamp; 471 qp->q.meat += skb->len; 472 qp->ecn |= ecn; 473 add_frag_mem_limit(&qp->q, skb->truesize); 474 if (offset == 0) 475 qp->q.flags |= INET_FRAG_FIRST_IN; 476 477 if (ip_hdr(skb)->frag_off & htons(IP_DF) && 478 skb->len + ihl > qp->q.max_size) 479 qp->q.max_size = skb->len + ihl; 480 481 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 482 qp->q.meat == qp->q.len) { 483 unsigned long orefdst = skb->_skb_refdst; 484 485 skb->_skb_refdst = 0UL; 486 err = ip_frag_reasm(qp, prev, dev); 487 skb->_skb_refdst = orefdst; 488 return err; 489 } 490 491 skb_dst_drop(skb); 492 return -EINPROGRESS; 493 494 err: 495 kfree_skb(skb); 496 return err; 497 } 498 499 500 /* Build a new IP datagram from all its fragments. */ 501 502 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 503 struct net_device *dev) 504 { 505 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 506 struct iphdr *iph; 507 struct sk_buff *fp, *head = qp->q.fragments; 508 int len; 509 int ihlen; 510 int err; 511 int sum_truesize; 512 u8 ecn; 513 514 ipq_kill(qp); 515 516 ecn = ip_frag_ecn_table[qp->ecn]; 517 if (unlikely(ecn == 0xff)) { 518 err = -EINVAL; 519 goto out_fail; 520 } 521 /* Make the one we just received the head. */ 522 if (prev) { 523 head = prev->next; 524 fp = skb_clone(head, GFP_ATOMIC); 525 if (!fp) 526 goto out_nomem; 527 528 fp->next = head->next; 529 if (!fp->next) 530 qp->q.fragments_tail = fp; 531 prev->next = fp; 532 533 skb_morph(head, qp->q.fragments); 534 head->next = qp->q.fragments->next; 535 536 consume_skb(qp->q.fragments); 537 qp->q.fragments = head; 538 } 539 540 WARN_ON(!head); 541 WARN_ON(FRAG_CB(head)->offset != 0); 542 543 /* Allocate a new buffer for the datagram. */ 544 ihlen = ip_hdrlen(head); 545 len = ihlen + qp->q.len; 546 547 err = -E2BIG; 548 if (len > 65535) 549 goto out_oversize; 550 551 /* Head of list must not be cloned. */ 552 if (skb_unclone(head, GFP_ATOMIC)) 553 goto out_nomem; 554 555 /* If the first fragment is fragmented itself, we split 556 * it to two chunks: the first with data and paged part 557 * and the second, holding only fragments. */ 558 if (skb_has_frag_list(head)) { 559 struct sk_buff *clone; 560 int i, plen = 0; 561 562 clone = alloc_skb(0, GFP_ATOMIC); 563 if (!clone) 564 goto out_nomem; 565 clone->next = head->next; 566 head->next = clone; 567 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 568 skb_frag_list_init(head); 569 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 570 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 571 clone->len = clone->data_len = head->data_len - plen; 572 head->data_len -= clone->len; 573 head->len -= clone->len; 574 clone->csum = 0; 575 clone->ip_summed = head->ip_summed; 576 add_frag_mem_limit(&qp->q, clone->truesize); 577 } 578 579 skb_push(head, head->data - skb_network_header(head)); 580 581 sum_truesize = head->truesize; 582 for (fp = head->next; fp;) { 583 bool headstolen; 584 int delta; 585 struct sk_buff *next = fp->next; 586 587 sum_truesize += fp->truesize; 588 if (head->ip_summed != fp->ip_summed) 589 head->ip_summed = CHECKSUM_NONE; 590 else if (head->ip_summed == CHECKSUM_COMPLETE) 591 head->csum = csum_add(head->csum, fp->csum); 592 593 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { 594 kfree_skb_partial(fp, headstolen); 595 } else { 596 if (!skb_shinfo(head)->frag_list) 597 skb_shinfo(head)->frag_list = fp; 598 head->data_len += fp->len; 599 head->len += fp->len; 600 head->truesize += fp->truesize; 601 } 602 fp = next; 603 } 604 sub_frag_mem_limit(&qp->q, sum_truesize); 605 606 head->next = NULL; 607 head->dev = dev; 608 head->tstamp = qp->q.stamp; 609 IPCB(head)->frag_max_size = qp->q.max_size; 610 611 iph = ip_hdr(head); 612 /* max_size != 0 implies at least one fragment had IP_DF set */ 613 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; 614 iph->tot_len = htons(len); 615 iph->tos |= ecn; 616 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 617 qp->q.fragments = NULL; 618 qp->q.fragments_tail = NULL; 619 return 0; 620 621 out_nomem: 622 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); 623 err = -ENOMEM; 624 goto out_fail; 625 out_oversize: 626 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); 627 out_fail: 628 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 629 return err; 630 } 631 632 /* Process an incoming IP datagram fragment. */ 633 int ip_defrag(struct sk_buff *skb, u32 user) 634 { 635 struct ipq *qp; 636 struct net *net; 637 638 net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); 639 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 640 641 /* Lookup (or create) queue header */ 642 qp = ip_find(net, ip_hdr(skb), user); 643 if (qp) { 644 int ret; 645 646 spin_lock(&qp->q.lock); 647 648 ret = ip_frag_queue(qp, skb); 649 650 spin_unlock(&qp->q.lock); 651 ipq_put(qp); 652 return ret; 653 } 654 655 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 656 kfree_skb(skb); 657 return -ENOMEM; 658 } 659 EXPORT_SYMBOL(ip_defrag); 660 661 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 662 { 663 struct iphdr iph; 664 int netoff; 665 u32 len; 666 667 if (skb->protocol != htons(ETH_P_IP)) 668 return skb; 669 670 netoff = skb_network_offset(skb); 671 672 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) 673 return skb; 674 675 if (iph.ihl < 5 || iph.version != 4) 676 return skb; 677 678 len = ntohs(iph.tot_len); 679 if (skb->len < netoff + len || len < (iph.ihl * 4)) 680 return skb; 681 682 if (ip_is_fragment(&iph)) { 683 skb = skb_share_check(skb, GFP_ATOMIC); 684 if (skb) { 685 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) 686 return skb; 687 if (pskb_trim_rcsum(skb, netoff + len)) 688 return skb; 689 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 690 if (ip_defrag(skb, user)) 691 return NULL; 692 skb_clear_hash(skb); 693 } 694 } 695 return skb; 696 } 697 EXPORT_SYMBOL(ip_check_defrag); 698 699 #ifdef CONFIG_SYSCTL 700 static int zero; 701 702 static struct ctl_table ip4_frags_ns_ctl_table[] = { 703 { 704 .procname = "ipfrag_high_thresh", 705 .data = &init_net.ipv4.frags.high_thresh, 706 .maxlen = sizeof(int), 707 .mode = 0644, 708 .proc_handler = proc_dointvec_minmax, 709 .extra1 = &init_net.ipv4.frags.low_thresh 710 }, 711 { 712 .procname = "ipfrag_low_thresh", 713 .data = &init_net.ipv4.frags.low_thresh, 714 .maxlen = sizeof(int), 715 .mode = 0644, 716 .proc_handler = proc_dointvec_minmax, 717 .extra1 = &zero, 718 .extra2 = &init_net.ipv4.frags.high_thresh 719 }, 720 { 721 .procname = "ipfrag_time", 722 .data = &init_net.ipv4.frags.timeout, 723 .maxlen = sizeof(int), 724 .mode = 0644, 725 .proc_handler = proc_dointvec_jiffies, 726 }, 727 { } 728 }; 729 730 /* secret interval has been deprecated */ 731 static int ip4_frags_secret_interval_unused; 732 static struct ctl_table ip4_frags_ctl_table[] = { 733 { 734 .procname = "ipfrag_secret_interval", 735 .data = &ip4_frags_secret_interval_unused, 736 .maxlen = sizeof(int), 737 .mode = 0644, 738 .proc_handler = proc_dointvec_jiffies, 739 }, 740 { 741 .procname = "ipfrag_max_dist", 742 .data = &sysctl_ipfrag_max_dist, 743 .maxlen = sizeof(int), 744 .mode = 0644, 745 .proc_handler = proc_dointvec_minmax, 746 .extra1 = &zero 747 }, 748 { } 749 }; 750 751 static int __net_init ip4_frags_ns_ctl_register(struct net *net) 752 { 753 struct ctl_table *table; 754 struct ctl_table_header *hdr; 755 756 table = ip4_frags_ns_ctl_table; 757 if (!net_eq(net, &init_net)) { 758 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 759 if (!table) 760 goto err_alloc; 761 762 table[0].data = &net->ipv4.frags.high_thresh; 763 table[0].extra1 = &net->ipv4.frags.low_thresh; 764 table[0].extra2 = &init_net.ipv4.frags.high_thresh; 765 table[1].data = &net->ipv4.frags.low_thresh; 766 table[1].extra2 = &net->ipv4.frags.high_thresh; 767 table[2].data = &net->ipv4.frags.timeout; 768 769 /* Don't export sysctls to unprivileged users */ 770 if (net->user_ns != &init_user_ns) 771 table[0].procname = NULL; 772 } 773 774 hdr = register_net_sysctl(net, "net/ipv4", table); 775 if (!hdr) 776 goto err_reg; 777 778 net->ipv4.frags_hdr = hdr; 779 return 0; 780 781 err_reg: 782 if (!net_eq(net, &init_net)) 783 kfree(table); 784 err_alloc: 785 return -ENOMEM; 786 } 787 788 static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) 789 { 790 struct ctl_table *table; 791 792 table = net->ipv4.frags_hdr->ctl_table_arg; 793 unregister_net_sysctl_table(net->ipv4.frags_hdr); 794 kfree(table); 795 } 796 797 static void __init ip4_frags_ctl_register(void) 798 { 799 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); 800 } 801 #else 802 static int ip4_frags_ns_ctl_register(struct net *net) 803 { 804 return 0; 805 } 806 807 static void ip4_frags_ns_ctl_unregister(struct net *net) 808 { 809 } 810 811 static void __init ip4_frags_ctl_register(void) 812 { 813 } 814 #endif 815 816 static int __net_init ipv4_frags_init_net(struct net *net) 817 { 818 /* Fragment cache limits. 819 * 820 * The fragment memory accounting code, (tries to) account for 821 * the real memory usage, by measuring both the size of frag 822 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) 823 * and the SKB's truesize. 824 * 825 * A 64K fragment consumes 129736 bytes (44*2944)+200 826 * (1500 truesize == 2944, sizeof(struct ipq) == 200) 827 * 828 * We will commit 4MB at one time. Should we cross that limit 829 * we will prune down to 3MB, making room for approx 8 big 64K 830 * fragments 8x128k. 831 */ 832 net->ipv4.frags.high_thresh = 4 * 1024 * 1024; 833 net->ipv4.frags.low_thresh = 3 * 1024 * 1024; 834 /* 835 * Important NOTE! Fragment queue must be destroyed before MSL expires. 836 * RFC791 is wrong proposing to prolongate timer each fragment arrival 837 * by TTL. 838 */ 839 net->ipv4.frags.timeout = IP_FRAG_TIME; 840 841 inet_frags_init_net(&net->ipv4.frags); 842 843 return ip4_frags_ns_ctl_register(net); 844 } 845 846 static void __net_exit ipv4_frags_exit_net(struct net *net) 847 { 848 ip4_frags_ns_ctl_unregister(net); 849 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 850 } 851 852 static struct pernet_operations ip4_frags_ops = { 853 .init = ipv4_frags_init_net, 854 .exit = ipv4_frags_exit_net, 855 }; 856 857 void __init ipfrag_init(void) 858 { 859 ip4_frags_ctl_register(); 860 register_pernet_subsys(&ip4_frags_ops); 861 ip4_frags.hashfn = ip4_hashfn; 862 ip4_frags.constructor = ip4_frag_init; 863 ip4_frags.destructor = ip4_frag_free; 864 ip4_frags.skb_free = NULL; 865 ip4_frags.qsize = sizeof(struct ipq); 866 ip4_frags.match = ip4_frag_match; 867 ip4_frags.frag_expire = ip_expire; 868 ip4_frags.frags_cache_name = ip_frag_cache_name; 869 if (inet_frags_init(&ip4_frags)) 870 panic("IP: failed to allocate ip4_frags cache\n"); 871 } 872