1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The Internet Protocol (IP) output module. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Donald Becker, <becker@super.org> 12 * Alan Cox, <Alan.Cox@linux.org> 13 * Richard Underwood 14 * Stefan Becker, <stefanb@yello.ping.de> 15 * Jorge Cwik, <jorge@laser.satlink.net> 16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 17 * Hirokazu Takahashi, <taka@valinux.co.jp> 18 * 19 * See ip_input.c for original log 20 * 21 * Fixes: 22 * Alan Cox : Missing nonblock feature in ip_build_xmit. 23 * Mike Kilburn : htons() missing in ip_build_xmit. 24 * Bradford Johnson: Fix faulty handling of some frames when 25 * no route is found. 26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit 27 * (in case if packet not accepted by 28 * output firewall rules) 29 * Mike McLagan : Routing by source 30 * Alexey Kuznetsov: use new route cache 31 * Andi Kleen: Fix broken PMTU recovery and remove 32 * some redundant tests. 33 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 34 * Andi Kleen : Replace ip_reply with ip_send_reply. 35 * Andi Kleen : Split fast and slow ip_build_xmit path 36 * for decreased register pressure on x86 37 * and more readibility. 38 * Marc Boucher : When call_out_firewall returns FW_QUEUE, 39 * silently drop skb instead of failing with -EPERM. 40 * Detlev Wengorz : Copy protocol for fragments. 41 * Hirokazu Takahashi: HW checksumming for outgoing UDP 42 * datagrams. 43 * Hirokazu Takahashi: sendfile() on UDP works now. 44 */ 45 46 #include <linux/uaccess.h> 47 #include <linux/module.h> 48 #include <linux/types.h> 49 #include <linux/kernel.h> 50 #include <linux/mm.h> 51 #include <linux/string.h> 52 #include <linux/errno.h> 53 #include <linux/highmem.h> 54 #include <linux/slab.h> 55 56 #include <linux/socket.h> 57 #include <linux/sockios.h> 58 #include <linux/in.h> 59 #include <linux/inet.h> 60 #include <linux/netdevice.h> 61 #include <linux/etherdevice.h> 62 #include <linux/proc_fs.h> 63 #include <linux/stat.h> 64 #include <linux/init.h> 65 66 #include <net/snmp.h> 67 #include <net/ip.h> 68 #include <net/protocol.h> 69 #include <net/route.h> 70 #include <net/xfrm.h> 71 #include <linux/skbuff.h> 72 #include <net/sock.h> 73 #include <net/arp.h> 74 #include <net/icmp.h> 75 #include <net/checksum.h> 76 #include <net/inetpeer.h> 77 #include <net/lwtunnel.h> 78 #include <linux/bpf-cgroup.h> 79 #include <linux/igmp.h> 80 #include <linux/netfilter_ipv4.h> 81 #include <linux/netfilter_bridge.h> 82 #include <linux/netlink.h> 83 #include <linux/tcp.h> 84 85 static int 86 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 87 unsigned int mtu, 88 int (*output)(struct net *, struct sock *, struct sk_buff *)); 89 90 /* Generate a checksum for an outgoing IP datagram. */ 91 void ip_send_check(struct iphdr *iph) 92 { 93 iph->check = 0; 94 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 95 } 96 EXPORT_SYMBOL(ip_send_check); 97 98 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 99 { 100 struct iphdr *iph = ip_hdr(skb); 101 102 iph->tot_len = htons(skb->len); 103 ip_send_check(iph); 104 105 /* if egress device is enslaved to an L3 master device pass the 106 * skb to its handler for processing 107 */ 108 skb = l3mdev_ip_out(sk, skb); 109 if (unlikely(!skb)) 110 return 0; 111 112 skb->protocol = htons(ETH_P_IP); 113 114 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 115 net, sk, skb, NULL, skb_dst(skb)->dev, 116 dst_output); 117 } 118 119 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 120 { 121 int err; 122 123 err = __ip_local_out(net, sk, skb); 124 if (likely(err == 1)) 125 err = dst_output(net, sk, skb); 126 127 return err; 128 } 129 EXPORT_SYMBOL_GPL(ip_local_out); 130 131 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) 132 { 133 int ttl = inet->uc_ttl; 134 135 if (ttl < 0) 136 ttl = ip4_dst_hoplimit(dst); 137 return ttl; 138 } 139 140 /* 141 * Add an ip header to a skbuff and send it out. 142 * 143 */ 144 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 145 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt) 146 { 147 struct inet_sock *inet = inet_sk(sk); 148 struct rtable *rt = skb_rtable(skb); 149 struct net *net = sock_net(sk); 150 struct iphdr *iph; 151 152 /* Build the IP header. */ 153 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); 154 skb_reset_network_header(skb); 155 iph = ip_hdr(skb); 156 iph->version = 4; 157 iph->ihl = 5; 158 iph->tos = inet->tos; 159 iph->ttl = ip_select_ttl(inet, &rt->dst); 160 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 161 iph->saddr = saddr; 162 iph->protocol = sk->sk_protocol; 163 if (ip_dont_fragment(sk, &rt->dst)) { 164 iph->frag_off = htons(IP_DF); 165 iph->id = 0; 166 } else { 167 iph->frag_off = 0; 168 __ip_select_ident(net, iph, 1); 169 } 170 171 if (opt && opt->opt.optlen) { 172 iph->ihl += opt->opt.optlen>>2; 173 ip_options_build(skb, &opt->opt, daddr, rt, 0); 174 } 175 176 skb->priority = sk->sk_priority; 177 if (!skb->mark) 178 skb->mark = sk->sk_mark; 179 180 /* Send it out. */ 181 return ip_local_out(net, skb->sk, skb); 182 } 183 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); 184 185 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) 186 { 187 struct dst_entry *dst = skb_dst(skb); 188 struct rtable *rt = (struct rtable *)dst; 189 struct net_device *dev = dst->dev; 190 unsigned int hh_len = LL_RESERVED_SPACE(dev); 191 struct neighbour *neigh; 192 bool is_v6gw = false; 193 194 if (rt->rt_type == RTN_MULTICAST) { 195 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); 196 } else if (rt->rt_type == RTN_BROADCAST) 197 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len); 198 199 /* Be paranoid, rather than too clever. */ 200 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 201 struct sk_buff *skb2; 202 203 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); 204 if (!skb2) { 205 kfree_skb(skb); 206 return -ENOMEM; 207 } 208 if (skb->sk) 209 skb_set_owner_w(skb2, skb->sk); 210 consume_skb(skb); 211 skb = skb2; 212 } 213 214 if (lwtunnel_xmit_redirect(dst->lwtstate)) { 215 int res = lwtunnel_xmit(skb); 216 217 if (res < 0 || res == LWTUNNEL_XMIT_DONE) 218 return res; 219 } 220 221 rcu_read_lock_bh(); 222 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 223 if (!IS_ERR(neigh)) { 224 int res; 225 226 sock_confirm_neigh(skb, neigh); 227 /* if crossing protocols, can not use the cached header */ 228 res = neigh_output(neigh, skb, is_v6gw); 229 rcu_read_unlock_bh(); 230 return res; 231 } 232 rcu_read_unlock_bh(); 233 234 net_dbg_ratelimited("%s: No header cache and no neighbour!\n", 235 __func__); 236 kfree_skb(skb); 237 return -EINVAL; 238 } 239 240 static int ip_finish_output_gso(struct net *net, struct sock *sk, 241 struct sk_buff *skb, unsigned int mtu) 242 { 243 netdev_features_t features; 244 struct sk_buff *segs; 245 int ret = 0; 246 247 /* common case: seglen is <= mtu 248 */ 249 if (skb_gso_validate_network_len(skb, mtu)) 250 return ip_finish_output2(net, sk, skb); 251 252 /* Slowpath - GSO segment length exceeds the egress MTU. 253 * 254 * This can happen in several cases: 255 * - Forwarding of a TCP GRO skb, when DF flag is not set. 256 * - Forwarding of an skb that arrived on a virtualization interface 257 * (virtio-net/vhost/tap) with TSO/GSO size set by other network 258 * stack. 259 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an 260 * interface with a smaller MTU. 261 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is 262 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an 263 * insufficent MTU. 264 */ 265 features = netif_skb_features(skb); 266 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); 267 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 268 if (IS_ERR_OR_NULL(segs)) { 269 kfree_skb(skb); 270 return -ENOMEM; 271 } 272 273 consume_skb(skb); 274 275 do { 276 struct sk_buff *nskb = segs->next; 277 int err; 278 279 skb_mark_not_on_list(segs); 280 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); 281 282 if (err && ret == 0) 283 ret = err; 284 segs = nskb; 285 } while (segs); 286 287 return ret; 288 } 289 290 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 291 { 292 unsigned int mtu; 293 294 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 295 /* Policy lookup after SNAT yielded a new policy */ 296 if (skb_dst(skb)->xfrm) { 297 IPCB(skb)->flags |= IPSKB_REROUTED; 298 return dst_output(net, sk, skb); 299 } 300 #endif 301 mtu = ip_skb_dst_mtu(sk, skb); 302 if (skb_is_gso(skb)) 303 return ip_finish_output_gso(net, sk, skb, mtu); 304 305 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) 306 return ip_fragment(net, sk, skb, mtu, ip_finish_output2); 307 308 return ip_finish_output2(net, sk, skb); 309 } 310 311 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 312 { 313 int ret; 314 315 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 316 switch (ret) { 317 case NET_XMIT_SUCCESS: 318 return __ip_finish_output(net, sk, skb); 319 case NET_XMIT_CN: 320 return __ip_finish_output(net, sk, skb) ? : ret; 321 default: 322 kfree_skb(skb); 323 return ret; 324 } 325 } 326 327 static int ip_mc_finish_output(struct net *net, struct sock *sk, 328 struct sk_buff *skb) 329 { 330 struct rtable *new_rt; 331 bool do_cn = false; 332 int ret, err; 333 334 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 335 switch (ret) { 336 case NET_XMIT_CN: 337 do_cn = true; 338 /* fall through */ 339 case NET_XMIT_SUCCESS: 340 break; 341 default: 342 kfree_skb(skb); 343 return ret; 344 } 345 346 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting 347 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten, 348 * see ipv4_pktinfo_prepare(). 349 */ 350 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb)); 351 if (new_rt) { 352 new_rt->rt_iif = 0; 353 skb_dst_drop(skb); 354 skb_dst_set(skb, &new_rt->dst); 355 } 356 357 err = dev_loopback_xmit(net, sk, skb); 358 return (do_cn && err) ? ret : err; 359 } 360 361 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) 362 { 363 struct rtable *rt = skb_rtable(skb); 364 struct net_device *dev = rt->dst.dev; 365 366 /* 367 * If the indicated interface is up and running, send the packet. 368 */ 369 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); 370 371 skb->dev = dev; 372 skb->protocol = htons(ETH_P_IP); 373 374 /* 375 * Multicasts are looped back for other local users 376 */ 377 378 if (rt->rt_flags&RTCF_MULTICAST) { 379 if (sk_mc_loop(sk) 380 #ifdef CONFIG_IP_MROUTE 381 /* Small optimization: do not loopback not local frames, 382 which returned after forwarding; they will be dropped 383 by ip_mr_input in any case. 384 Note, that local frames are looped back to be delivered 385 to local recipients. 386 387 This check is duplicated in ip_mr_input at the moment. 388 */ 389 && 390 ((rt->rt_flags & RTCF_LOCAL) || 391 !(IPCB(skb)->flags & IPSKB_FORWARDED)) 392 #endif 393 ) { 394 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 395 if (newskb) 396 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 397 net, sk, newskb, NULL, newskb->dev, 398 ip_mc_finish_output); 399 } 400 401 /* Multicasts with ttl 0 must not go beyond the host */ 402 403 if (ip_hdr(skb)->ttl == 0) { 404 kfree_skb(skb); 405 return 0; 406 } 407 } 408 409 if (rt->rt_flags&RTCF_BROADCAST) { 410 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 411 if (newskb) 412 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 413 net, sk, newskb, NULL, newskb->dev, 414 ip_mc_finish_output); 415 } 416 417 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 418 net, sk, skb, NULL, skb->dev, 419 ip_finish_output, 420 !(IPCB(skb)->flags & IPSKB_REROUTED)); 421 } 422 423 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) 424 { 425 struct net_device *dev = skb_dst(skb)->dev; 426 427 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); 428 429 skb->dev = dev; 430 skb->protocol = htons(ETH_P_IP); 431 432 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 433 net, sk, skb, NULL, dev, 434 ip_finish_output, 435 !(IPCB(skb)->flags & IPSKB_REROUTED)); 436 } 437 438 /* 439 * copy saddr and daddr, possibly using 64bit load/stores 440 * Equivalent to : 441 * iph->saddr = fl4->saddr; 442 * iph->daddr = fl4->daddr; 443 */ 444 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) 445 { 446 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != 447 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); 448 memcpy(&iph->saddr, &fl4->saddr, 449 sizeof(fl4->saddr) + sizeof(fl4->daddr)); 450 } 451 452 /* Note: skb->sk can be different from sk, in case of tunnels */ 453 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 454 __u8 tos) 455 { 456 struct inet_sock *inet = inet_sk(sk); 457 struct net *net = sock_net(sk); 458 struct ip_options_rcu *inet_opt; 459 struct flowi4 *fl4; 460 struct rtable *rt; 461 struct iphdr *iph; 462 int res; 463 464 /* Skip all of this if the packet is already routed, 465 * f.e. by something like SCTP. 466 */ 467 rcu_read_lock(); 468 inet_opt = rcu_dereference(inet->inet_opt); 469 fl4 = &fl->u.ip4; 470 rt = skb_rtable(skb); 471 if (rt) 472 goto packet_routed; 473 474 /* Make sure we can route this packet. */ 475 rt = (struct rtable *)__sk_dst_check(sk, 0); 476 if (!rt) { 477 __be32 daddr; 478 479 /* Use correct destination address if we have options. */ 480 daddr = inet->inet_daddr; 481 if (inet_opt && inet_opt->opt.srr) 482 daddr = inet_opt->opt.faddr; 483 484 /* If this fails, retransmit mechanism of transport layer will 485 * keep trying until route appears or the connection times 486 * itself out. 487 */ 488 rt = ip_route_output_ports(net, fl4, sk, 489 daddr, inet->inet_saddr, 490 inet->inet_dport, 491 inet->inet_sport, 492 sk->sk_protocol, 493 RT_CONN_FLAGS_TOS(sk, tos), 494 sk->sk_bound_dev_if); 495 if (IS_ERR(rt)) 496 goto no_route; 497 sk_setup_caps(sk, &rt->dst); 498 } 499 skb_dst_set_noref(skb, &rt->dst); 500 501 packet_routed: 502 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) 503 goto no_route; 504 505 /* OK, we know where to send it, allocate and build IP header. */ 506 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); 507 skb_reset_network_header(skb); 508 iph = ip_hdr(skb); 509 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff)); 510 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) 511 iph->frag_off = htons(IP_DF); 512 else 513 iph->frag_off = 0; 514 iph->ttl = ip_select_ttl(inet, &rt->dst); 515 iph->protocol = sk->sk_protocol; 516 ip_copy_addrs(iph, fl4); 517 518 /* Transport layer set skb->h.foo itself. */ 519 520 if (inet_opt && inet_opt->opt.optlen) { 521 iph->ihl += inet_opt->opt.optlen >> 2; 522 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); 523 } 524 525 ip_select_ident_segs(net, skb, sk, 526 skb_shinfo(skb)->gso_segs ?: 1); 527 528 /* TODO : should we use skb->sk here instead of sk ? */ 529 skb->priority = sk->sk_priority; 530 skb->mark = sk->sk_mark; 531 532 res = ip_local_out(net, sk, skb); 533 rcu_read_unlock(); 534 return res; 535 536 no_route: 537 rcu_read_unlock(); 538 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 539 kfree_skb(skb); 540 return -EHOSTUNREACH; 541 } 542 EXPORT_SYMBOL(__ip_queue_xmit); 543 544 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) 545 { 546 to->pkt_type = from->pkt_type; 547 to->priority = from->priority; 548 to->protocol = from->protocol; 549 to->skb_iif = from->skb_iif; 550 skb_dst_drop(to); 551 skb_dst_copy(to, from); 552 to->dev = from->dev; 553 to->mark = from->mark; 554 555 skb_copy_hash(to, from); 556 557 #ifdef CONFIG_NET_SCHED 558 to->tc_index = from->tc_index; 559 #endif 560 nf_copy(to, from); 561 skb_ext_copy(to, from); 562 #if IS_ENABLED(CONFIG_IP_VS) 563 to->ipvs_property = from->ipvs_property; 564 #endif 565 skb_copy_secmark(to, from); 566 } 567 568 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 569 unsigned int mtu, 570 int (*output)(struct net *, struct sock *, struct sk_buff *)) 571 { 572 struct iphdr *iph = ip_hdr(skb); 573 574 if ((iph->frag_off & htons(IP_DF)) == 0) 575 return ip_do_fragment(net, sk, skb, output); 576 577 if (unlikely(!skb->ignore_df || 578 (IPCB(skb)->frag_max_size && 579 IPCB(skb)->frag_max_size > mtu))) { 580 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 581 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 582 htonl(mtu)); 583 kfree_skb(skb); 584 return -EMSGSIZE; 585 } 586 587 return ip_do_fragment(net, sk, skb, output); 588 } 589 590 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, 591 unsigned int hlen, struct ip_fraglist_iter *iter) 592 { 593 unsigned int first_len = skb_pagelen(skb); 594 595 iter->frag = skb_shinfo(skb)->frag_list; 596 skb_frag_list_init(skb); 597 598 iter->offset = 0; 599 iter->iph = iph; 600 iter->hlen = hlen; 601 602 skb->data_len = first_len - skb_headlen(skb); 603 skb->len = first_len; 604 iph->tot_len = htons(first_len); 605 iph->frag_off = htons(IP_MF); 606 ip_send_check(iph); 607 } 608 EXPORT_SYMBOL(ip_fraglist_init); 609 610 static void ip_fraglist_ipcb_prepare(struct sk_buff *skb, 611 struct ip_fraglist_iter *iter) 612 { 613 struct sk_buff *to = iter->frag; 614 615 /* Copy the flags to each fragment. */ 616 IPCB(to)->flags = IPCB(skb)->flags; 617 618 if (iter->offset == 0) 619 ip_options_fragment(to); 620 } 621 622 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) 623 { 624 unsigned int hlen = iter->hlen; 625 struct iphdr *iph = iter->iph; 626 struct sk_buff *frag; 627 628 frag = iter->frag; 629 frag->ip_summed = CHECKSUM_NONE; 630 skb_reset_transport_header(frag); 631 __skb_push(frag, hlen); 632 skb_reset_network_header(frag); 633 memcpy(skb_network_header(frag), iph, hlen); 634 iter->iph = ip_hdr(frag); 635 iph = iter->iph; 636 iph->tot_len = htons(frag->len); 637 ip_copy_metadata(frag, skb); 638 iter->offset += skb->len - hlen; 639 iph->frag_off = htons(iter->offset >> 3); 640 if (frag->next) 641 iph->frag_off |= htons(IP_MF); 642 /* Ready, complete checksum */ 643 ip_send_check(iph); 644 } 645 EXPORT_SYMBOL(ip_fraglist_prepare); 646 647 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, 648 unsigned int ll_rs, unsigned int mtu, 649 struct ip_frag_state *state) 650 { 651 struct iphdr *iph = ip_hdr(skb); 652 653 state->hlen = hlen; 654 state->ll_rs = ll_rs; 655 state->mtu = mtu; 656 657 state->left = skb->len - hlen; /* Space per frame */ 658 state->ptr = hlen; /* Where to start from */ 659 660 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3; 661 state->not_last_frag = iph->frag_off & htons(IP_MF); 662 } 663 EXPORT_SYMBOL(ip_frag_init); 664 665 static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, 666 bool first_frag, struct ip_frag_state *state) 667 { 668 /* Copy the flags to each fragment. */ 669 IPCB(to)->flags = IPCB(from)->flags; 670 671 if (IPCB(from)->flags & IPSKB_FRAG_PMTU) 672 state->iph->frag_off |= htons(IP_DF); 673 674 /* ANK: dirty, but effective trick. Upgrade options only if 675 * the segment to be fragmented was THE FIRST (otherwise, 676 * options are already fixed) and make it ONCE 677 * on the initial skb, so that all the following fragments 678 * will inherit fixed options. 679 */ 680 if (first_frag) 681 ip_options_fragment(from); 682 } 683 684 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state) 685 { 686 unsigned int len = state->left; 687 struct sk_buff *skb2; 688 struct iphdr *iph; 689 690 len = state->left; 691 /* IF: it doesn't fit, use 'mtu' - the data space left */ 692 if (len > state->mtu) 693 len = state->mtu; 694 /* IF: we are not sending up to and including the packet end 695 then align the next start on an eight byte boundary */ 696 if (len < state->left) { 697 len &= ~7; 698 } 699 700 /* Allocate buffer */ 701 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC); 702 if (!skb2) 703 return ERR_PTR(-ENOMEM); 704 705 /* 706 * Set up data on packet 707 */ 708 709 ip_copy_metadata(skb2, skb); 710 skb_reserve(skb2, state->ll_rs); 711 skb_put(skb2, len + state->hlen); 712 skb_reset_network_header(skb2); 713 skb2->transport_header = skb2->network_header + state->hlen; 714 715 /* 716 * Charge the memory for the fragment to any owner 717 * it might possess 718 */ 719 720 if (skb->sk) 721 skb_set_owner_w(skb2, skb->sk); 722 723 /* 724 * Copy the packet header into the new buffer. 725 */ 726 727 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen); 728 729 /* 730 * Copy a block of the IP datagram. 731 */ 732 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len)) 733 BUG(); 734 state->left -= len; 735 736 /* 737 * Fill in the new header fields. 738 */ 739 iph = ip_hdr(skb2); 740 iph->frag_off = htons((state->offset >> 3)); 741 742 /* 743 * Added AC : If we are fragmenting a fragment that's not the 744 * last fragment then keep MF on each bit 745 */ 746 if (state->left > 0 || state->not_last_frag) 747 iph->frag_off |= htons(IP_MF); 748 state->ptr += len; 749 state->offset += len; 750 751 iph->tot_len = htons(len + state->hlen); 752 753 ip_send_check(iph); 754 755 return skb2; 756 } 757 EXPORT_SYMBOL(ip_frag_next); 758 759 /* 760 * This IP datagram is too large to be sent in one piece. Break it up into 761 * smaller pieces (each of size equal to IP header plus 762 * a block of the data of the original IP data part) that will yet fit in a 763 * single device frame, and queue such a frame for sending. 764 */ 765 766 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 767 int (*output)(struct net *, struct sock *, struct sk_buff *)) 768 { 769 struct iphdr *iph; 770 struct sk_buff *skb2; 771 struct rtable *rt = skb_rtable(skb); 772 unsigned int mtu, hlen, ll_rs; 773 struct ip_fraglist_iter iter; 774 ktime_t tstamp = skb->tstamp; 775 struct ip_frag_state state; 776 int err = 0; 777 778 /* for offloaded checksums cleanup checksum before fragmentation */ 779 if (skb->ip_summed == CHECKSUM_PARTIAL && 780 (err = skb_checksum_help(skb))) 781 goto fail; 782 783 /* 784 * Point into the IP datagram header. 785 */ 786 787 iph = ip_hdr(skb); 788 789 mtu = ip_skb_dst_mtu(sk, skb); 790 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 791 mtu = IPCB(skb)->frag_max_size; 792 793 /* 794 * Setup starting values. 795 */ 796 797 hlen = iph->ihl * 4; 798 mtu = mtu - hlen; /* Size of data space */ 799 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; 800 ll_rs = LL_RESERVED_SPACE(rt->dst.dev); 801 802 /* When frag_list is given, use it. First, check its validity: 803 * some transformers could create wrong frag_list or break existing 804 * one, it is not prohibited. In this case fall back to copying. 805 * 806 * LATER: this step can be merged to real generation of fragments, 807 * we can switch to copy when see the first bad fragment. 808 */ 809 if (skb_has_frag_list(skb)) { 810 struct sk_buff *frag, *frag2; 811 unsigned int first_len = skb_pagelen(skb); 812 813 if (first_len - hlen > mtu || 814 ((first_len - hlen) & 7) || 815 ip_is_fragment(iph) || 816 skb_cloned(skb) || 817 skb_headroom(skb) < ll_rs) 818 goto slow_path; 819 820 skb_walk_frags(skb, frag) { 821 /* Correct geometry. */ 822 if (frag->len > mtu || 823 ((frag->len & 7) && frag->next) || 824 skb_headroom(frag) < hlen + ll_rs) 825 goto slow_path_clean; 826 827 /* Partially cloned skb? */ 828 if (skb_shared(frag)) 829 goto slow_path_clean; 830 831 BUG_ON(frag->sk); 832 if (skb->sk) { 833 frag->sk = skb->sk; 834 frag->destructor = sock_wfree; 835 } 836 skb->truesize -= frag->truesize; 837 } 838 839 /* Everything is OK. Generate! */ 840 ip_fraglist_init(skb, iph, hlen, &iter); 841 842 for (;;) { 843 /* Prepare header of the next frame, 844 * before previous one went down. */ 845 if (iter.frag) { 846 ip_fraglist_ipcb_prepare(skb, &iter); 847 ip_fraglist_prepare(skb, &iter); 848 } 849 850 skb->tstamp = tstamp; 851 err = output(net, sk, skb); 852 853 if (!err) 854 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); 855 if (err || !iter.frag) 856 break; 857 858 skb = ip_fraglist_next(&iter); 859 } 860 861 if (err == 0) { 862 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); 863 return 0; 864 } 865 866 kfree_skb_list(iter.frag); 867 868 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 869 return err; 870 871 slow_path_clean: 872 skb_walk_frags(skb, frag2) { 873 if (frag2 == frag) 874 break; 875 frag2->sk = NULL; 876 frag2->destructor = NULL; 877 skb->truesize += frag2->truesize; 878 } 879 } 880 881 slow_path: 882 /* 883 * Fragment the datagram. 884 */ 885 886 ip_frag_init(skb, hlen, ll_rs, mtu, &state); 887 888 /* 889 * Keep copying data until we run out. 890 */ 891 892 while (state.left > 0) { 893 bool first_frag = (state.offset == 0); 894 895 skb2 = ip_frag_next(skb, &state); 896 if (IS_ERR(skb2)) { 897 err = PTR_ERR(skb2); 898 goto fail; 899 } 900 ip_frag_ipcb(skb, skb2, first_frag, &state); 901 902 /* 903 * Put this fragment into the sending queue. 904 */ 905 skb2->tstamp = tstamp; 906 err = output(net, sk, skb2); 907 if (err) 908 goto fail; 909 910 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); 911 } 912 consume_skb(skb); 913 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); 914 return err; 915 916 fail: 917 kfree_skb(skb); 918 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 919 return err; 920 } 921 EXPORT_SYMBOL(ip_do_fragment); 922 923 int 924 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 925 { 926 struct msghdr *msg = from; 927 928 if (skb->ip_summed == CHECKSUM_PARTIAL) { 929 if (!copy_from_iter_full(to, len, &msg->msg_iter)) 930 return -EFAULT; 931 } else { 932 __wsum csum = 0; 933 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter)) 934 return -EFAULT; 935 skb->csum = csum_block_add(skb->csum, csum, odd); 936 } 937 return 0; 938 } 939 EXPORT_SYMBOL(ip_generic_getfrag); 940 941 static inline __wsum 942 csum_page(struct page *page, int offset, int copy) 943 { 944 char *kaddr; 945 __wsum csum; 946 kaddr = kmap(page); 947 csum = csum_partial(kaddr + offset, copy, 0); 948 kunmap(page); 949 return csum; 950 } 951 952 static int __ip_append_data(struct sock *sk, 953 struct flowi4 *fl4, 954 struct sk_buff_head *queue, 955 struct inet_cork *cork, 956 struct page_frag *pfrag, 957 int getfrag(void *from, char *to, int offset, 958 int len, int odd, struct sk_buff *skb), 959 void *from, int length, int transhdrlen, 960 unsigned int flags) 961 { 962 struct inet_sock *inet = inet_sk(sk); 963 struct ubuf_info *uarg = NULL; 964 struct sk_buff *skb; 965 966 struct ip_options *opt = cork->opt; 967 int hh_len; 968 int exthdrlen; 969 int mtu; 970 int copy; 971 int err; 972 int offset = 0; 973 unsigned int maxfraglen, fragheaderlen, maxnonfragsize; 974 int csummode = CHECKSUM_NONE; 975 struct rtable *rt = (struct rtable *)cork->dst; 976 unsigned int wmem_alloc_delta = 0; 977 bool paged, extra_uref = false; 978 u32 tskey = 0; 979 980 skb = skb_peek_tail(queue); 981 982 exthdrlen = !skb ? rt->dst.header_len : 0; 983 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; 984 paged = !!cork->gso_size; 985 986 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && 987 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) 988 tskey = sk->sk_tskey++; 989 990 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 991 992 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 993 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 994 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; 995 996 if (cork->length + length > maxnonfragsize - fragheaderlen) { 997 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 998 mtu - (opt ? opt->optlen : 0)); 999 return -EMSGSIZE; 1000 } 1001 1002 /* 1003 * transhdrlen > 0 means that this is the first fragment and we wish 1004 * it won't be fragmented in the future. 1005 */ 1006 if (transhdrlen && 1007 length + fragheaderlen <= mtu && 1008 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) && 1009 (!(flags & MSG_MORE) || cork->gso_size) && 1010 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM))) 1011 csummode = CHECKSUM_PARTIAL; 1012 1013 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) { 1014 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb)); 1015 if (!uarg) 1016 return -ENOBUFS; 1017 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */ 1018 if (rt->dst.dev->features & NETIF_F_SG && 1019 csummode == CHECKSUM_PARTIAL) { 1020 paged = true; 1021 } else { 1022 uarg->zerocopy = 0; 1023 skb_zcopy_set(skb, uarg, &extra_uref); 1024 } 1025 } 1026 1027 cork->length += length; 1028 1029 /* So, what's going on in the loop below? 1030 * 1031 * We use calculated fragment length to generate chained skb, 1032 * each of segments is IP fragment ready for sending to network after 1033 * adding appropriate IP header. 1034 */ 1035 1036 if (!skb) 1037 goto alloc_new_skb; 1038 1039 while (length > 0) { 1040 /* Check if the remaining data fits into current packet. */ 1041 copy = mtu - skb->len; 1042 if (copy < length) 1043 copy = maxfraglen - skb->len; 1044 if (copy <= 0) { 1045 char *data; 1046 unsigned int datalen; 1047 unsigned int fraglen; 1048 unsigned int fraggap; 1049 unsigned int alloclen; 1050 unsigned int pagedlen; 1051 struct sk_buff *skb_prev; 1052 alloc_new_skb: 1053 skb_prev = skb; 1054 if (skb_prev) 1055 fraggap = skb_prev->len - maxfraglen; 1056 else 1057 fraggap = 0; 1058 1059 /* 1060 * If remaining data exceeds the mtu, 1061 * we know we need more fragment(s). 1062 */ 1063 datalen = length + fraggap; 1064 if (datalen > mtu - fragheaderlen) 1065 datalen = maxfraglen - fragheaderlen; 1066 fraglen = datalen + fragheaderlen; 1067 pagedlen = 0; 1068 1069 if ((flags & MSG_MORE) && 1070 !(rt->dst.dev->features&NETIF_F_SG)) 1071 alloclen = mtu; 1072 else if (!paged) 1073 alloclen = fraglen; 1074 else { 1075 alloclen = min_t(int, fraglen, MAX_HEADER); 1076 pagedlen = fraglen - alloclen; 1077 } 1078 1079 alloclen += exthdrlen; 1080 1081 /* The last fragment gets additional space at tail. 1082 * Note, with MSG_MORE we overallocate on fragments, 1083 * because we have no idea what fragment will be 1084 * the last. 1085 */ 1086 if (datalen == length + fraggap) 1087 alloclen += rt->dst.trailer_len; 1088 1089 if (transhdrlen) { 1090 skb = sock_alloc_send_skb(sk, 1091 alloclen + hh_len + 15, 1092 (flags & MSG_DONTWAIT), &err); 1093 } else { 1094 skb = NULL; 1095 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= 1096 2 * sk->sk_sndbuf) 1097 skb = alloc_skb(alloclen + hh_len + 15, 1098 sk->sk_allocation); 1099 if (unlikely(!skb)) 1100 err = -ENOBUFS; 1101 } 1102 if (!skb) 1103 goto error; 1104 1105 /* 1106 * Fill in the control structures 1107 */ 1108 skb->ip_summed = csummode; 1109 skb->csum = 0; 1110 skb_reserve(skb, hh_len); 1111 1112 /* 1113 * Find where to start putting bytes. 1114 */ 1115 data = skb_put(skb, fraglen + exthdrlen - pagedlen); 1116 skb_set_network_header(skb, exthdrlen); 1117 skb->transport_header = (skb->network_header + 1118 fragheaderlen); 1119 data += fragheaderlen + exthdrlen; 1120 1121 if (fraggap) { 1122 skb->csum = skb_copy_and_csum_bits( 1123 skb_prev, maxfraglen, 1124 data + transhdrlen, fraggap, 0); 1125 skb_prev->csum = csum_sub(skb_prev->csum, 1126 skb->csum); 1127 data += fraggap; 1128 pskb_trim_unique(skb_prev, maxfraglen); 1129 } 1130 1131 copy = datalen - transhdrlen - fraggap - pagedlen; 1132 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { 1133 err = -EFAULT; 1134 kfree_skb(skb); 1135 goto error; 1136 } 1137 1138 offset += copy; 1139 length -= copy + transhdrlen; 1140 transhdrlen = 0; 1141 exthdrlen = 0; 1142 csummode = CHECKSUM_NONE; 1143 1144 /* only the initial fragment is time stamped */ 1145 skb_shinfo(skb)->tx_flags = cork->tx_flags; 1146 cork->tx_flags = 0; 1147 skb_shinfo(skb)->tskey = tskey; 1148 tskey = 0; 1149 skb_zcopy_set(skb, uarg, &extra_uref); 1150 1151 if ((flags & MSG_CONFIRM) && !skb_prev) 1152 skb_set_dst_pending_confirm(skb, 1); 1153 1154 /* 1155 * Put the packet on the pending queue. 1156 */ 1157 if (!skb->destructor) { 1158 skb->destructor = sock_wfree; 1159 skb->sk = sk; 1160 wmem_alloc_delta += skb->truesize; 1161 } 1162 __skb_queue_tail(queue, skb); 1163 continue; 1164 } 1165 1166 if (copy > length) 1167 copy = length; 1168 1169 if (!(rt->dst.dev->features&NETIF_F_SG) && 1170 skb_tailroom(skb) >= copy) { 1171 unsigned int off; 1172 1173 off = skb->len; 1174 if (getfrag(from, skb_put(skb, copy), 1175 offset, copy, off, skb) < 0) { 1176 __skb_trim(skb, off); 1177 err = -EFAULT; 1178 goto error; 1179 } 1180 } else if (!uarg || !uarg->zerocopy) { 1181 int i = skb_shinfo(skb)->nr_frags; 1182 1183 err = -ENOMEM; 1184 if (!sk_page_frag_refill(sk, pfrag)) 1185 goto error; 1186 1187 if (!skb_can_coalesce(skb, i, pfrag->page, 1188 pfrag->offset)) { 1189 err = -EMSGSIZE; 1190 if (i == MAX_SKB_FRAGS) 1191 goto error; 1192 1193 __skb_fill_page_desc(skb, i, pfrag->page, 1194 pfrag->offset, 0); 1195 skb_shinfo(skb)->nr_frags = ++i; 1196 get_page(pfrag->page); 1197 } 1198 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1199 if (getfrag(from, 1200 page_address(pfrag->page) + pfrag->offset, 1201 offset, copy, skb->len, skb) < 0) 1202 goto error_efault; 1203 1204 pfrag->offset += copy; 1205 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1206 skb->len += copy; 1207 skb->data_len += copy; 1208 skb->truesize += copy; 1209 wmem_alloc_delta += copy; 1210 } else { 1211 err = skb_zerocopy_iter_dgram(skb, from, copy); 1212 if (err < 0) 1213 goto error; 1214 } 1215 offset += copy; 1216 length -= copy; 1217 } 1218 1219 if (wmem_alloc_delta) 1220 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1221 return 0; 1222 1223 error_efault: 1224 err = -EFAULT; 1225 error: 1226 if (uarg) 1227 sock_zerocopy_put_abort(uarg, extra_uref); 1228 cork->length -= length; 1229 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1230 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1231 return err; 1232 } 1233 1234 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, 1235 struct ipcm_cookie *ipc, struct rtable **rtp) 1236 { 1237 struct ip_options_rcu *opt; 1238 struct rtable *rt; 1239 1240 rt = *rtp; 1241 if (unlikely(!rt)) 1242 return -EFAULT; 1243 1244 /* 1245 * setup for corking. 1246 */ 1247 opt = ipc->opt; 1248 if (opt) { 1249 if (!cork->opt) { 1250 cork->opt = kmalloc(sizeof(struct ip_options) + 40, 1251 sk->sk_allocation); 1252 if (unlikely(!cork->opt)) 1253 return -ENOBUFS; 1254 } 1255 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); 1256 cork->flags |= IPCORK_OPT; 1257 cork->addr = ipc->addr; 1258 } 1259 1260 /* 1261 * We steal reference to this route, caller should not release it 1262 */ 1263 *rtp = NULL; 1264 cork->fragsize = ip_sk_use_pmtu(sk) ? 1265 dst_mtu(&rt->dst) : rt->dst.dev->mtu; 1266 1267 cork->gso_size = ipc->gso_size; 1268 cork->dst = &rt->dst; 1269 cork->length = 0; 1270 cork->ttl = ipc->ttl; 1271 cork->tos = ipc->tos; 1272 cork->mark = ipc->sockc.mark; 1273 cork->priority = ipc->priority; 1274 cork->transmit_time = ipc->sockc.transmit_time; 1275 cork->tx_flags = 0; 1276 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags); 1277 1278 return 0; 1279 } 1280 1281 /* 1282 * ip_append_data() and ip_append_page() can make one large IP datagram 1283 * from many pieces of data. Each pieces will be holded on the socket 1284 * until ip_push_pending_frames() is called. Each piece can be a page 1285 * or non-page data. 1286 * 1287 * Not only UDP, other transport protocols - e.g. raw sockets - can use 1288 * this interface potentially. 1289 * 1290 * LATER: length must be adjusted by pad at tail, when it is required. 1291 */ 1292 int ip_append_data(struct sock *sk, struct flowi4 *fl4, 1293 int getfrag(void *from, char *to, int offset, int len, 1294 int odd, struct sk_buff *skb), 1295 void *from, int length, int transhdrlen, 1296 struct ipcm_cookie *ipc, struct rtable **rtp, 1297 unsigned int flags) 1298 { 1299 struct inet_sock *inet = inet_sk(sk); 1300 int err; 1301 1302 if (flags&MSG_PROBE) 1303 return 0; 1304 1305 if (skb_queue_empty(&sk->sk_write_queue)) { 1306 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); 1307 if (err) 1308 return err; 1309 } else { 1310 transhdrlen = 0; 1311 } 1312 1313 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, 1314 sk_page_frag(sk), getfrag, 1315 from, length, transhdrlen, flags); 1316 } 1317 1318 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, 1319 int offset, size_t size, int flags) 1320 { 1321 struct inet_sock *inet = inet_sk(sk); 1322 struct sk_buff *skb; 1323 struct rtable *rt; 1324 struct ip_options *opt = NULL; 1325 struct inet_cork *cork; 1326 int hh_len; 1327 int mtu; 1328 int len; 1329 int err; 1330 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize; 1331 1332 if (inet->hdrincl) 1333 return -EPERM; 1334 1335 if (flags&MSG_PROBE) 1336 return 0; 1337 1338 if (skb_queue_empty(&sk->sk_write_queue)) 1339 return -EINVAL; 1340 1341 cork = &inet->cork.base; 1342 rt = (struct rtable *)cork->dst; 1343 if (cork->flags & IPCORK_OPT) 1344 opt = cork->opt; 1345 1346 if (!(rt->dst.dev->features&NETIF_F_SG)) 1347 return -EOPNOTSUPP; 1348 1349 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1350 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; 1351 1352 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1353 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 1354 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; 1355 1356 if (cork->length + size > maxnonfragsize - fragheaderlen) { 1357 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 1358 mtu - (opt ? opt->optlen : 0)); 1359 return -EMSGSIZE; 1360 } 1361 1362 skb = skb_peek_tail(&sk->sk_write_queue); 1363 if (!skb) 1364 return -EINVAL; 1365 1366 cork->length += size; 1367 1368 while (size > 0) { 1369 /* Check if the remaining data fits into current packet. */ 1370 len = mtu - skb->len; 1371 if (len < size) 1372 len = maxfraglen - skb->len; 1373 1374 if (len <= 0) { 1375 struct sk_buff *skb_prev; 1376 int alloclen; 1377 1378 skb_prev = skb; 1379 fraggap = skb_prev->len - maxfraglen; 1380 1381 alloclen = fragheaderlen + hh_len + fraggap + 15; 1382 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); 1383 if (unlikely(!skb)) { 1384 err = -ENOBUFS; 1385 goto error; 1386 } 1387 1388 /* 1389 * Fill in the control structures 1390 */ 1391 skb->ip_summed = CHECKSUM_NONE; 1392 skb->csum = 0; 1393 skb_reserve(skb, hh_len); 1394 1395 /* 1396 * Find where to start putting bytes. 1397 */ 1398 skb_put(skb, fragheaderlen + fraggap); 1399 skb_reset_network_header(skb); 1400 skb->transport_header = (skb->network_header + 1401 fragheaderlen); 1402 if (fraggap) { 1403 skb->csum = skb_copy_and_csum_bits(skb_prev, 1404 maxfraglen, 1405 skb_transport_header(skb), 1406 fraggap, 0); 1407 skb_prev->csum = csum_sub(skb_prev->csum, 1408 skb->csum); 1409 pskb_trim_unique(skb_prev, maxfraglen); 1410 } 1411 1412 /* 1413 * Put the packet on the pending queue. 1414 */ 1415 __skb_queue_tail(&sk->sk_write_queue, skb); 1416 continue; 1417 } 1418 1419 if (len > size) 1420 len = size; 1421 1422 if (skb_append_pagefrags(skb, page, offset, len)) { 1423 err = -EMSGSIZE; 1424 goto error; 1425 } 1426 1427 if (skb->ip_summed == CHECKSUM_NONE) { 1428 __wsum csum; 1429 csum = csum_page(page, offset, len); 1430 skb->csum = csum_block_add(skb->csum, csum, skb->len); 1431 } 1432 1433 skb->len += len; 1434 skb->data_len += len; 1435 skb->truesize += len; 1436 refcount_add(len, &sk->sk_wmem_alloc); 1437 offset += len; 1438 size -= len; 1439 } 1440 return 0; 1441 1442 error: 1443 cork->length -= size; 1444 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1445 return err; 1446 } 1447 1448 static void ip_cork_release(struct inet_cork *cork) 1449 { 1450 cork->flags &= ~IPCORK_OPT; 1451 kfree(cork->opt); 1452 cork->opt = NULL; 1453 dst_release(cork->dst); 1454 cork->dst = NULL; 1455 } 1456 1457 /* 1458 * Combined all pending IP fragments on the socket as one IP datagram 1459 * and push them out. 1460 */ 1461 struct sk_buff *__ip_make_skb(struct sock *sk, 1462 struct flowi4 *fl4, 1463 struct sk_buff_head *queue, 1464 struct inet_cork *cork) 1465 { 1466 struct sk_buff *skb, *tmp_skb; 1467 struct sk_buff **tail_skb; 1468 struct inet_sock *inet = inet_sk(sk); 1469 struct net *net = sock_net(sk); 1470 struct ip_options *opt = NULL; 1471 struct rtable *rt = (struct rtable *)cork->dst; 1472 struct iphdr *iph; 1473 __be16 df = 0; 1474 __u8 ttl; 1475 1476 skb = __skb_dequeue(queue); 1477 if (!skb) 1478 goto out; 1479 tail_skb = &(skb_shinfo(skb)->frag_list); 1480 1481 /* move skb->data to ip header from ext header */ 1482 if (skb->data < skb_network_header(skb)) 1483 __skb_pull(skb, skb_network_offset(skb)); 1484 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { 1485 __skb_pull(tmp_skb, skb_network_header_len(skb)); 1486 *tail_skb = tmp_skb; 1487 tail_skb = &(tmp_skb->next); 1488 skb->len += tmp_skb->len; 1489 skb->data_len += tmp_skb->len; 1490 skb->truesize += tmp_skb->truesize; 1491 tmp_skb->destructor = NULL; 1492 tmp_skb->sk = NULL; 1493 } 1494 1495 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow 1496 * to fragment the frame generated here. No matter, what transforms 1497 * how transforms change size of the packet, it will come out. 1498 */ 1499 skb->ignore_df = ip_sk_ignore_df(sk); 1500 1501 /* DF bit is set when we want to see DF on outgoing frames. 1502 * If ignore_df is set too, we still allow to fragment this frame 1503 * locally. */ 1504 if (inet->pmtudisc == IP_PMTUDISC_DO || 1505 inet->pmtudisc == IP_PMTUDISC_PROBE || 1506 (skb->len <= dst_mtu(&rt->dst) && 1507 ip_dont_fragment(sk, &rt->dst))) 1508 df = htons(IP_DF); 1509 1510 if (cork->flags & IPCORK_OPT) 1511 opt = cork->opt; 1512 1513 if (cork->ttl != 0) 1514 ttl = cork->ttl; 1515 else if (rt->rt_type == RTN_MULTICAST) 1516 ttl = inet->mc_ttl; 1517 else 1518 ttl = ip_select_ttl(inet, &rt->dst); 1519 1520 iph = ip_hdr(skb); 1521 iph->version = 4; 1522 iph->ihl = 5; 1523 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos; 1524 iph->frag_off = df; 1525 iph->ttl = ttl; 1526 iph->protocol = sk->sk_protocol; 1527 ip_copy_addrs(iph, fl4); 1528 ip_select_ident(net, skb, sk); 1529 1530 if (opt) { 1531 iph->ihl += opt->optlen>>2; 1532 ip_options_build(skb, opt, cork->addr, rt, 0); 1533 } 1534 1535 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; 1536 skb->mark = cork->mark; 1537 skb->tstamp = cork->transmit_time; 1538 /* 1539 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec 1540 * on dst refcount 1541 */ 1542 cork->dst = NULL; 1543 skb_dst_set(skb, &rt->dst); 1544 1545 if (iph->protocol == IPPROTO_ICMP) 1546 icmp_out_count(net, ((struct icmphdr *) 1547 skb_transport_header(skb))->type); 1548 1549 ip_cork_release(cork); 1550 out: 1551 return skb; 1552 } 1553 1554 int ip_send_skb(struct net *net, struct sk_buff *skb) 1555 { 1556 int err; 1557 1558 err = ip_local_out(net, skb->sk, skb); 1559 if (err) { 1560 if (err > 0) 1561 err = net_xmit_errno(err); 1562 if (err) 1563 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); 1564 } 1565 1566 return err; 1567 } 1568 1569 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) 1570 { 1571 struct sk_buff *skb; 1572 1573 skb = ip_finish_skb(sk, fl4); 1574 if (!skb) 1575 return 0; 1576 1577 /* Netfilter gets whole the not fragmented skb. */ 1578 return ip_send_skb(sock_net(sk), skb); 1579 } 1580 1581 /* 1582 * Throw away all pending data on the socket. 1583 */ 1584 static void __ip_flush_pending_frames(struct sock *sk, 1585 struct sk_buff_head *queue, 1586 struct inet_cork *cork) 1587 { 1588 struct sk_buff *skb; 1589 1590 while ((skb = __skb_dequeue_tail(queue)) != NULL) 1591 kfree_skb(skb); 1592 1593 ip_cork_release(cork); 1594 } 1595 1596 void ip_flush_pending_frames(struct sock *sk) 1597 { 1598 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 1599 } 1600 1601 struct sk_buff *ip_make_skb(struct sock *sk, 1602 struct flowi4 *fl4, 1603 int getfrag(void *from, char *to, int offset, 1604 int len, int odd, struct sk_buff *skb), 1605 void *from, int length, int transhdrlen, 1606 struct ipcm_cookie *ipc, struct rtable **rtp, 1607 struct inet_cork *cork, unsigned int flags) 1608 { 1609 struct sk_buff_head queue; 1610 int err; 1611 1612 if (flags & MSG_PROBE) 1613 return NULL; 1614 1615 __skb_queue_head_init(&queue); 1616 1617 cork->flags = 0; 1618 cork->addr = 0; 1619 cork->opt = NULL; 1620 err = ip_setup_cork(sk, cork, ipc, rtp); 1621 if (err) 1622 return ERR_PTR(err); 1623 1624 err = __ip_append_data(sk, fl4, &queue, cork, 1625 ¤t->task_frag, getfrag, 1626 from, length, transhdrlen, flags); 1627 if (err) { 1628 __ip_flush_pending_frames(sk, &queue, cork); 1629 return ERR_PTR(err); 1630 } 1631 1632 return __ip_make_skb(sk, fl4, &queue, cork); 1633 } 1634 1635 /* 1636 * Fetch data from kernel space and fill in checksum if needed. 1637 */ 1638 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 1639 int len, int odd, struct sk_buff *skb) 1640 { 1641 __wsum csum; 1642 1643 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); 1644 skb->csum = csum_block_add(skb->csum, csum, odd); 1645 return 0; 1646 } 1647 1648 /* 1649 * Generic function to send a packet as reply to another packet. 1650 * Used to send some TCP resets/acks so far. 1651 */ 1652 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 1653 const struct ip_options *sopt, 1654 __be32 daddr, __be32 saddr, 1655 const struct ip_reply_arg *arg, 1656 unsigned int len, u64 transmit_time) 1657 { 1658 struct ip_options_data replyopts; 1659 struct ipcm_cookie ipc; 1660 struct flowi4 fl4; 1661 struct rtable *rt = skb_rtable(skb); 1662 struct net *net = sock_net(sk); 1663 struct sk_buff *nskb; 1664 int err; 1665 int oif; 1666 1667 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) 1668 return; 1669 1670 ipcm_init(&ipc); 1671 ipc.addr = daddr; 1672 ipc.sockc.transmit_time = transmit_time; 1673 1674 if (replyopts.opt.opt.optlen) { 1675 ipc.opt = &replyopts.opt; 1676 1677 if (replyopts.opt.opt.srr) 1678 daddr = replyopts.opt.opt.faddr; 1679 } 1680 1681 oif = arg->bound_dev_if; 1682 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 1683 oif = skb->skb_iif; 1684 1685 flowi4_init_output(&fl4, oif, 1686 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark, 1687 RT_TOS(arg->tos), 1688 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, 1689 ip_reply_arg_flowi_flags(arg), 1690 daddr, saddr, 1691 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, 1692 arg->uid); 1693 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1694 rt = ip_route_output_key(net, &fl4); 1695 if (IS_ERR(rt)) 1696 return; 1697 1698 inet_sk(sk)->tos = arg->tos; 1699 1700 sk->sk_protocol = ip_hdr(skb)->protocol; 1701 sk->sk_bound_dev_if = arg->bound_dev_if; 1702 sk->sk_sndbuf = sysctl_wmem_default; 1703 sk->sk_mark = fl4.flowi4_mark; 1704 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1705 len, 0, &ipc, &rt, MSG_DONTWAIT); 1706 if (unlikely(err)) { 1707 ip_flush_pending_frames(sk); 1708 goto out; 1709 } 1710 1711 nskb = skb_peek(&sk->sk_write_queue); 1712 if (nskb) { 1713 if (arg->csumoffset >= 0) 1714 *((__sum16 *)skb_transport_header(nskb) + 1715 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1716 arg->csum)); 1717 nskb->ip_summed = CHECKSUM_NONE; 1718 ip_push_pending_frames(sk, &fl4); 1719 } 1720 out: 1721 ip_rt_put(rt); 1722 } 1723 1724 void __init ip_init(void) 1725 { 1726 ip_rt_init(); 1727 inet_initpeers(); 1728 1729 #if defined(CONFIG_IP_MULTICAST) 1730 igmp_mc_init(); 1731 #endif 1732 } 1733