1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The IP fragmentation functionality. 7 * 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 9 * Alan Cox <alan@lxorguk.ukuu.org.uk> 10 * 11 * Fixes: 12 * Alan Cox : Split from ip.c , see ip_input.c for history. 13 * David S. Miller : Begin massive cleanup... 14 * Andi Kleen : Add sysctls. 15 * xxxx : Overlapfrag bug. 16 * Ultima : ip_expire() kernel panic. 17 * Bill Hawes : Frag accounting and evictor fixes. 18 * John McDonald : 0 length frag bug. 19 * Alexey Kuznetsov: SMP races, threading, cleanup. 20 * Patrick McHardy : LRU queue of frag heads for evictor. 21 */ 22 23 #define pr_fmt(fmt) "IPv4: " fmt 24 25 #include <linux/compiler.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/jiffies.h> 30 #include <linux/skbuff.h> 31 #include <linux/list.h> 32 #include <linux/ip.h> 33 #include <linux/icmp.h> 34 #include <linux/netdevice.h> 35 #include <linux/jhash.h> 36 #include <linux/random.h> 37 #include <linux/slab.h> 38 #include <net/route.h> 39 #include <net/dst.h> 40 #include <net/sock.h> 41 #include <net/ip.h> 42 #include <net/icmp.h> 43 #include <net/checksum.h> 44 #include <net/inetpeer.h> 45 #include <net/inet_frag.h> 46 #include <linux/tcp.h> 47 #include <linux/udp.h> 48 #include <linux/inet.h> 49 #include <linux/netfilter_ipv4.h> 50 #include <net/inet_ecn.h> 51 52 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 54 * as well. Or notify me, at least. --ANK 55 */ 56 57 static int sysctl_ipfrag_max_dist __read_mostly = 64; 58 static const char ip_frag_cache_name[] = "ip4-frags"; 59 60 struct ipfrag_skb_cb 61 { 62 struct inet_skb_parm h; 63 int offset; 64 }; 65 66 #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) 67 68 /* Describe an entry in the "incomplete datagrams" queue. */ 69 struct ipq { 70 struct inet_frag_queue q; 71 72 u32 user; 73 __be32 saddr; 74 __be32 daddr; 75 __be16 id; 76 u8 protocol; 77 u8 ecn; /* RFC3168 support */ 78 u16 max_df_size; /* largest frag with DF set seen */ 79 int iif; 80 unsigned int rid; 81 struct inet_peer *peer; 82 }; 83 84 static u8 ip4_frag_ecn(u8 tos) 85 { 86 return 1 << (tos & INET_ECN_MASK); 87 } 88 89 static struct inet_frags ip4_frags; 90 91 int ip_frag_mem(struct net *net) 92 { 93 return sum_frag_mem_limit(&net->ipv4.frags); 94 } 95 96 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 97 struct net_device *dev); 98 99 struct ip4_create_arg { 100 struct iphdr *iph; 101 u32 user; 102 }; 103 104 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) 105 { 106 net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); 107 return jhash_3words((__force u32)id << 16 | prot, 108 (__force u32)saddr, (__force u32)daddr, 109 ip4_frags.rnd); 110 } 111 112 static unsigned int ip4_hashfn(const struct inet_frag_queue *q) 113 { 114 const struct ipq *ipq; 115 116 ipq = container_of(q, struct ipq, q); 117 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); 118 } 119 120 static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) 121 { 122 const struct ipq *qp; 123 const struct ip4_create_arg *arg = a; 124 125 qp = container_of(q, struct ipq, q); 126 return qp->id == arg->iph->id && 127 qp->saddr == arg->iph->saddr && 128 qp->daddr == arg->iph->daddr && 129 qp->protocol == arg->iph->protocol && 130 qp->user == arg->user; 131 } 132 133 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) 134 { 135 struct ipq *qp = container_of(q, struct ipq, q); 136 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, 137 frags); 138 struct net *net = container_of(ipv4, struct net, ipv4); 139 140 const struct ip4_create_arg *arg = a; 141 142 qp->protocol = arg->iph->protocol; 143 qp->id = arg->iph->id; 144 qp->ecn = ip4_frag_ecn(arg->iph->tos); 145 qp->saddr = arg->iph->saddr; 146 qp->daddr = arg->iph->daddr; 147 qp->user = arg->user; 148 qp->peer = sysctl_ipfrag_max_dist ? 149 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; 150 } 151 152 static void ip4_frag_free(struct inet_frag_queue *q) 153 { 154 struct ipq *qp; 155 156 qp = container_of(q, struct ipq, q); 157 if (qp->peer) 158 inet_putpeer(qp->peer); 159 } 160 161 162 /* Destruction primitives. */ 163 164 static void ipq_put(struct ipq *ipq) 165 { 166 inet_frag_put(&ipq->q, &ip4_frags); 167 } 168 169 /* Kill ipq entry. It is not destroyed immediately, 170 * because caller (and someone more) holds reference count. 171 */ 172 static void ipq_kill(struct ipq *ipq) 173 { 174 inet_frag_kill(&ipq->q, &ip4_frags); 175 } 176 177 static bool frag_expire_skip_icmp(u32 user) 178 { 179 return user == IP_DEFRAG_AF_PACKET || 180 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, 181 __IP_DEFRAG_CONNTRACK_IN_END) || 182 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, 183 __IP_DEFRAG_CONNTRACK_BRIDGE_IN); 184 } 185 186 /* 187 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 188 */ 189 static void ip_expire(unsigned long arg) 190 { 191 struct ipq *qp; 192 struct net *net; 193 194 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 195 net = container_of(qp->q.net, struct net, ipv4.frags); 196 197 spin_lock(&qp->q.lock); 198 199 if (qp->q.flags & INET_FRAG_COMPLETE) 200 goto out; 201 202 ipq_kill(qp); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 204 205 if (!inet_frag_evicting(&qp->q)) { 206 struct sk_buff *head = qp->q.fragments; 207 const struct iphdr *iph; 208 int err; 209 210 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); 211 212 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) 213 goto out; 214 215 rcu_read_lock(); 216 head->dev = dev_get_by_index_rcu(net, qp->iif); 217 if (!head->dev) 218 goto out_rcu_unlock; 219 220 /* skb has no dst, perform route lookup again */ 221 iph = ip_hdr(head); 222 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 223 iph->tos, head->dev); 224 if (err) 225 goto out_rcu_unlock; 226 227 /* Only an end host needs to send an ICMP 228 * "Fragment Reassembly Timeout" message, per RFC792. 229 */ 230 if (frag_expire_skip_icmp(qp->user) && 231 (skb_rtable(head)->rt_type != RTN_LOCAL)) 232 goto out_rcu_unlock; 233 234 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 235 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 236 out_rcu_unlock: 237 rcu_read_unlock(); 238 } 239 out: 240 spin_unlock(&qp->q.lock); 241 ipq_put(qp); 242 } 243 244 /* Find the correct entry in the "incomplete datagrams" queue for 245 * this IP datagram, and create new one, if nothing is found. 246 */ 247 static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) 248 { 249 struct inet_frag_queue *q; 250 struct ip4_create_arg arg; 251 unsigned int hash; 252 253 arg.iph = iph; 254 arg.user = user; 255 256 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 257 258 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 259 if (IS_ERR_OR_NULL(q)) { 260 inet_frag_maybe_warn_overflow(q, pr_fmt()); 261 return NULL; 262 } 263 return container_of(q, struct ipq, q); 264 } 265 266 /* Is the fragment too far ahead to be part of ipq? */ 267 static int ip_frag_too_far(struct ipq *qp) 268 { 269 struct inet_peer *peer = qp->peer; 270 unsigned int max = sysctl_ipfrag_max_dist; 271 unsigned int start, end; 272 273 int rc; 274 275 if (!peer || !max) 276 return 0; 277 278 start = qp->rid; 279 end = atomic_inc_return(&peer->rid); 280 qp->rid = end; 281 282 rc = qp->q.fragments && (end - start) > max; 283 284 if (rc) { 285 struct net *net; 286 287 net = container_of(qp->q.net, struct net, ipv4.frags); 288 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 289 } 290 291 return rc; 292 } 293 294 static int ip_frag_reinit(struct ipq *qp) 295 { 296 struct sk_buff *fp; 297 unsigned int sum_truesize = 0; 298 299 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { 300 atomic_inc(&qp->q.refcnt); 301 return -ETIMEDOUT; 302 } 303 304 fp = qp->q.fragments; 305 do { 306 struct sk_buff *xp = fp->next; 307 308 sum_truesize += fp->truesize; 309 kfree_skb(fp); 310 fp = xp; 311 } while (fp); 312 sub_frag_mem_limit(qp->q.net, sum_truesize); 313 314 qp->q.flags = 0; 315 qp->q.len = 0; 316 qp->q.meat = 0; 317 qp->q.fragments = NULL; 318 qp->q.fragments_tail = NULL; 319 qp->iif = 0; 320 qp->ecn = 0; 321 322 return 0; 323 } 324 325 /* Add new segment to existing queue. */ 326 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 327 { 328 struct sk_buff *prev, *next; 329 struct net_device *dev; 330 unsigned int fragsize; 331 int flags, offset; 332 int ihl, end; 333 int err = -ENOENT; 334 u8 ecn; 335 336 if (qp->q.flags & INET_FRAG_COMPLETE) 337 goto err; 338 339 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 340 unlikely(ip_frag_too_far(qp)) && 341 unlikely(err = ip_frag_reinit(qp))) { 342 ipq_kill(qp); 343 goto err; 344 } 345 346 ecn = ip4_frag_ecn(ip_hdr(skb)->tos); 347 offset = ntohs(ip_hdr(skb)->frag_off); 348 flags = offset & ~IP_OFFSET; 349 offset &= IP_OFFSET; 350 offset <<= 3; /* offset is in 8-byte chunks */ 351 ihl = ip_hdrlen(skb); 352 353 /* Determine the position of this fragment. */ 354 end = offset + skb->len - skb_network_offset(skb) - ihl; 355 err = -EINVAL; 356 357 /* Is this the final fragment? */ 358 if ((flags & IP_MF) == 0) { 359 /* If we already have some bits beyond end 360 * or have different end, the segment is corrupted. 361 */ 362 if (end < qp->q.len || 363 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) 364 goto err; 365 qp->q.flags |= INET_FRAG_LAST_IN; 366 qp->q.len = end; 367 } else { 368 if (end&7) { 369 end &= ~7; 370 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 371 skb->ip_summed = CHECKSUM_NONE; 372 } 373 if (end > qp->q.len) { 374 /* Some bits beyond end -> corruption. */ 375 if (qp->q.flags & INET_FRAG_LAST_IN) 376 goto err; 377 qp->q.len = end; 378 } 379 } 380 if (end == offset) 381 goto err; 382 383 err = -ENOMEM; 384 if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) 385 goto err; 386 387 err = pskb_trim_rcsum(skb, end - offset); 388 if (err) 389 goto err; 390 391 /* Find out which fragments are in front and at the back of us 392 * in the chain of fragments so far. We must know where to put 393 * this fragment, right? 394 */ 395 prev = qp->q.fragments_tail; 396 if (!prev || FRAG_CB(prev)->offset < offset) { 397 next = NULL; 398 goto found; 399 } 400 prev = NULL; 401 for (next = qp->q.fragments; next != NULL; next = next->next) { 402 if (FRAG_CB(next)->offset >= offset) 403 break; /* bingo! */ 404 prev = next; 405 } 406 407 found: 408 /* We found where to put this one. Check for overlap with 409 * preceding fragment, and, if needed, align things so that 410 * any overlaps are eliminated. 411 */ 412 if (prev) { 413 int i = (FRAG_CB(prev)->offset + prev->len) - offset; 414 415 if (i > 0) { 416 offset += i; 417 err = -EINVAL; 418 if (end <= offset) 419 goto err; 420 err = -ENOMEM; 421 if (!pskb_pull(skb, i)) 422 goto err; 423 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 424 skb->ip_summed = CHECKSUM_NONE; 425 } 426 } 427 428 err = -ENOMEM; 429 430 while (next && FRAG_CB(next)->offset < end) { 431 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ 432 433 if (i < next->len) { 434 /* Eat head of the next overlapped fragment 435 * and leave the loop. The next ones cannot overlap. 436 */ 437 if (!pskb_pull(next, i)) 438 goto err; 439 FRAG_CB(next)->offset += i; 440 qp->q.meat -= i; 441 if (next->ip_summed != CHECKSUM_UNNECESSARY) 442 next->ip_summed = CHECKSUM_NONE; 443 break; 444 } else { 445 struct sk_buff *free_it = next; 446 447 /* Old fragment is completely overridden with 448 * new one drop it. 449 */ 450 next = next->next; 451 452 if (prev) 453 prev->next = next; 454 else 455 qp->q.fragments = next; 456 457 qp->q.meat -= free_it->len; 458 sub_frag_mem_limit(qp->q.net, free_it->truesize); 459 kfree_skb(free_it); 460 } 461 } 462 463 FRAG_CB(skb)->offset = offset; 464 465 /* Insert this fragment in the chain of fragments. */ 466 skb->next = next; 467 if (!next) 468 qp->q.fragments_tail = skb; 469 if (prev) 470 prev->next = skb; 471 else 472 qp->q.fragments = skb; 473 474 dev = skb->dev; 475 if (dev) { 476 qp->iif = dev->ifindex; 477 skb->dev = NULL; 478 } 479 qp->q.stamp = skb->tstamp; 480 qp->q.meat += skb->len; 481 qp->ecn |= ecn; 482 add_frag_mem_limit(qp->q.net, skb->truesize); 483 if (offset == 0) 484 qp->q.flags |= INET_FRAG_FIRST_IN; 485 486 fragsize = skb->len + ihl; 487 488 if (fragsize > qp->q.max_size) 489 qp->q.max_size = fragsize; 490 491 if (ip_hdr(skb)->frag_off & htons(IP_DF) && 492 fragsize > qp->max_df_size) 493 qp->max_df_size = fragsize; 494 495 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 496 qp->q.meat == qp->q.len) { 497 unsigned long orefdst = skb->_skb_refdst; 498 499 skb->_skb_refdst = 0UL; 500 err = ip_frag_reasm(qp, prev, dev); 501 skb->_skb_refdst = orefdst; 502 return err; 503 } 504 505 skb_dst_drop(skb); 506 return -EINPROGRESS; 507 508 err: 509 kfree_skb(skb); 510 return err; 511 } 512 513 514 /* Build a new IP datagram from all its fragments. */ 515 516 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 517 struct net_device *dev) 518 { 519 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 520 struct iphdr *iph; 521 struct sk_buff *fp, *head = qp->q.fragments; 522 int len; 523 int ihlen; 524 int err; 525 u8 ecn; 526 527 ipq_kill(qp); 528 529 ecn = ip_frag_ecn_table[qp->ecn]; 530 if (unlikely(ecn == 0xff)) { 531 err = -EINVAL; 532 goto out_fail; 533 } 534 /* Make the one we just received the head. */ 535 if (prev) { 536 head = prev->next; 537 fp = skb_clone(head, GFP_ATOMIC); 538 if (!fp) 539 goto out_nomem; 540 541 fp->next = head->next; 542 if (!fp->next) 543 qp->q.fragments_tail = fp; 544 prev->next = fp; 545 546 skb_morph(head, qp->q.fragments); 547 head->next = qp->q.fragments->next; 548 549 consume_skb(qp->q.fragments); 550 qp->q.fragments = head; 551 } 552 553 WARN_ON(!head); 554 WARN_ON(FRAG_CB(head)->offset != 0); 555 556 /* Allocate a new buffer for the datagram. */ 557 ihlen = ip_hdrlen(head); 558 len = ihlen + qp->q.len; 559 560 err = -E2BIG; 561 if (len > 65535) 562 goto out_oversize; 563 564 /* Head of list must not be cloned. */ 565 if (skb_unclone(head, GFP_ATOMIC)) 566 goto out_nomem; 567 568 /* If the first fragment is fragmented itself, we split 569 * it to two chunks: the first with data and paged part 570 * and the second, holding only fragments. */ 571 if (skb_has_frag_list(head)) { 572 struct sk_buff *clone; 573 int i, plen = 0; 574 575 clone = alloc_skb(0, GFP_ATOMIC); 576 if (!clone) 577 goto out_nomem; 578 clone->next = head->next; 579 head->next = clone; 580 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 581 skb_frag_list_init(head); 582 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 583 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 584 clone->len = clone->data_len = head->data_len - plen; 585 head->data_len -= clone->len; 586 head->len -= clone->len; 587 clone->csum = 0; 588 clone->ip_summed = head->ip_summed; 589 add_frag_mem_limit(qp->q.net, clone->truesize); 590 } 591 592 skb_shinfo(head)->frag_list = head->next; 593 skb_push(head, head->data - skb_network_header(head)); 594 595 for (fp=head->next; fp; fp = fp->next) { 596 head->data_len += fp->len; 597 head->len += fp->len; 598 if (head->ip_summed != fp->ip_summed) 599 head->ip_summed = CHECKSUM_NONE; 600 else if (head->ip_summed == CHECKSUM_COMPLETE) 601 head->csum = csum_add(head->csum, fp->csum); 602 head->truesize += fp->truesize; 603 } 604 sub_frag_mem_limit(qp->q.net, head->truesize); 605 606 head->next = NULL; 607 head->dev = dev; 608 head->tstamp = qp->q.stamp; 609 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 610 611 iph = ip_hdr(head); 612 iph->tot_len = htons(len); 613 iph->tos |= ecn; 614 615 /* When we set IP_DF on a refragmented skb we must also force a 616 * call to ip_fragment to avoid forwarding a DF-skb of size s while 617 * original sender only sent fragments of size f (where f < s). 618 * 619 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest 620 * frag seen to avoid sending tiny DF-fragments in case skb was built 621 * from one very small df-fragment and one large non-df frag. 622 */ 623 if (qp->max_df_size == qp->q.max_size) { 624 IPCB(head)->flags |= IPSKB_FRAG_PMTU; 625 iph->frag_off = htons(IP_DF); 626 } else { 627 iph->frag_off = 0; 628 } 629 630 ip_send_check(iph); 631 632 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 633 qp->q.fragments = NULL; 634 qp->q.fragments_tail = NULL; 635 return 0; 636 637 out_nomem: 638 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); 639 err = -ENOMEM; 640 goto out_fail; 641 out_oversize: 642 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); 643 out_fail: 644 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 645 return err; 646 } 647 648 /* Process an incoming IP datagram fragment. */ 649 int ip_defrag(struct sk_buff *skb, u32 user) 650 { 651 struct ipq *qp; 652 struct net *net; 653 654 net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); 655 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 656 657 /* Lookup (or create) queue header */ 658 qp = ip_find(net, ip_hdr(skb), user); 659 if (qp) { 660 int ret; 661 662 spin_lock(&qp->q.lock); 663 664 ret = ip_frag_queue(qp, skb); 665 666 spin_unlock(&qp->q.lock); 667 ipq_put(qp); 668 return ret; 669 } 670 671 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 672 kfree_skb(skb); 673 return -ENOMEM; 674 } 675 EXPORT_SYMBOL(ip_defrag); 676 677 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 678 { 679 struct iphdr iph; 680 int netoff; 681 u32 len; 682 683 if (skb->protocol != htons(ETH_P_IP)) 684 return skb; 685 686 netoff = skb_network_offset(skb); 687 688 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) 689 return skb; 690 691 if (iph.ihl < 5 || iph.version != 4) 692 return skb; 693 694 len = ntohs(iph.tot_len); 695 if (skb->len < netoff + len || len < (iph.ihl * 4)) 696 return skb; 697 698 if (ip_is_fragment(&iph)) { 699 skb = skb_share_check(skb, GFP_ATOMIC); 700 if (skb) { 701 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) 702 return skb; 703 if (pskb_trim_rcsum(skb, netoff + len)) 704 return skb; 705 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 706 if (ip_defrag(skb, user)) 707 return NULL; 708 skb_clear_hash(skb); 709 } 710 } 711 return skb; 712 } 713 EXPORT_SYMBOL(ip_check_defrag); 714 715 #ifdef CONFIG_SYSCTL 716 static int zero; 717 718 static struct ctl_table ip4_frags_ns_ctl_table[] = { 719 { 720 .procname = "ipfrag_high_thresh", 721 .data = &init_net.ipv4.frags.high_thresh, 722 .maxlen = sizeof(int), 723 .mode = 0644, 724 .proc_handler = proc_dointvec_minmax, 725 .extra1 = &init_net.ipv4.frags.low_thresh 726 }, 727 { 728 .procname = "ipfrag_low_thresh", 729 .data = &init_net.ipv4.frags.low_thresh, 730 .maxlen = sizeof(int), 731 .mode = 0644, 732 .proc_handler = proc_dointvec_minmax, 733 .extra1 = &zero, 734 .extra2 = &init_net.ipv4.frags.high_thresh 735 }, 736 { 737 .procname = "ipfrag_time", 738 .data = &init_net.ipv4.frags.timeout, 739 .maxlen = sizeof(int), 740 .mode = 0644, 741 .proc_handler = proc_dointvec_jiffies, 742 }, 743 { } 744 }; 745 746 /* secret interval has been deprecated */ 747 static int ip4_frags_secret_interval_unused; 748 static struct ctl_table ip4_frags_ctl_table[] = { 749 { 750 .procname = "ipfrag_secret_interval", 751 .data = &ip4_frags_secret_interval_unused, 752 .maxlen = sizeof(int), 753 .mode = 0644, 754 .proc_handler = proc_dointvec_jiffies, 755 }, 756 { 757 .procname = "ipfrag_max_dist", 758 .data = &sysctl_ipfrag_max_dist, 759 .maxlen = sizeof(int), 760 .mode = 0644, 761 .proc_handler = proc_dointvec_minmax, 762 .extra1 = &zero 763 }, 764 { } 765 }; 766 767 static int __net_init ip4_frags_ns_ctl_register(struct net *net) 768 { 769 struct ctl_table *table; 770 struct ctl_table_header *hdr; 771 772 table = ip4_frags_ns_ctl_table; 773 if (!net_eq(net, &init_net)) { 774 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 775 if (!table) 776 goto err_alloc; 777 778 table[0].data = &net->ipv4.frags.high_thresh; 779 table[0].extra1 = &net->ipv4.frags.low_thresh; 780 table[0].extra2 = &init_net.ipv4.frags.high_thresh; 781 table[1].data = &net->ipv4.frags.low_thresh; 782 table[1].extra2 = &net->ipv4.frags.high_thresh; 783 table[2].data = &net->ipv4.frags.timeout; 784 785 /* Don't export sysctls to unprivileged users */ 786 if (net->user_ns != &init_user_ns) 787 table[0].procname = NULL; 788 } 789 790 hdr = register_net_sysctl(net, "net/ipv4", table); 791 if (!hdr) 792 goto err_reg; 793 794 net->ipv4.frags_hdr = hdr; 795 return 0; 796 797 err_reg: 798 if (!net_eq(net, &init_net)) 799 kfree(table); 800 err_alloc: 801 return -ENOMEM; 802 } 803 804 static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) 805 { 806 struct ctl_table *table; 807 808 table = net->ipv4.frags_hdr->ctl_table_arg; 809 unregister_net_sysctl_table(net->ipv4.frags_hdr); 810 kfree(table); 811 } 812 813 static void __init ip4_frags_ctl_register(void) 814 { 815 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); 816 } 817 #else 818 static int ip4_frags_ns_ctl_register(struct net *net) 819 { 820 return 0; 821 } 822 823 static void ip4_frags_ns_ctl_unregister(struct net *net) 824 { 825 } 826 827 static void __init ip4_frags_ctl_register(void) 828 { 829 } 830 #endif 831 832 static int __net_init ipv4_frags_init_net(struct net *net) 833 { 834 /* Fragment cache limits. 835 * 836 * The fragment memory accounting code, (tries to) account for 837 * the real memory usage, by measuring both the size of frag 838 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) 839 * and the SKB's truesize. 840 * 841 * A 64K fragment consumes 129736 bytes (44*2944)+200 842 * (1500 truesize == 2944, sizeof(struct ipq) == 200) 843 * 844 * We will commit 4MB at one time. Should we cross that limit 845 * we will prune down to 3MB, making room for approx 8 big 64K 846 * fragments 8x128k. 847 */ 848 net->ipv4.frags.high_thresh = 4 * 1024 * 1024; 849 net->ipv4.frags.low_thresh = 3 * 1024 * 1024; 850 /* 851 * Important NOTE! Fragment queue must be destroyed before MSL expires. 852 * RFC791 is wrong proposing to prolongate timer each fragment arrival 853 * by TTL. 854 */ 855 net->ipv4.frags.timeout = IP_FRAG_TIME; 856 857 inet_frags_init_net(&net->ipv4.frags); 858 859 return ip4_frags_ns_ctl_register(net); 860 } 861 862 static void __net_exit ipv4_frags_exit_net(struct net *net) 863 { 864 ip4_frags_ns_ctl_unregister(net); 865 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 866 } 867 868 static struct pernet_operations ip4_frags_ops = { 869 .init = ipv4_frags_init_net, 870 .exit = ipv4_frags_exit_net, 871 }; 872 873 void __init ipfrag_init(void) 874 { 875 ip4_frags_ctl_register(); 876 register_pernet_subsys(&ip4_frags_ops); 877 ip4_frags.hashfn = ip4_hashfn; 878 ip4_frags.constructor = ip4_frag_init; 879 ip4_frags.destructor = ip4_frag_free; 880 ip4_frags.skb_free = NULL; 881 ip4_frags.qsize = sizeof(struct ipq); 882 ip4_frags.match = ip4_frag_match; 883 ip4_frags.frag_expire = ip_expire; 884 ip4_frags.frags_cache_name = ip_frag_cache_name; 885 if (inet_frags_init(&ip4_frags)) 886 panic("IP: failed to allocate ip4_frags cache\n"); 887 } 888