1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * The Internet Protocol (IP) output module. 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Donald Becker, <becker@super.org> 121da177e4SLinus Torvalds * Alan Cox, <Alan.Cox@linux.org> 131da177e4SLinus Torvalds * Richard Underwood 141da177e4SLinus Torvalds * Stefan Becker, <stefanb@yello.ping.de> 151da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 161da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 171da177e4SLinus Torvalds * Hirokazu Takahashi, <taka@valinux.co.jp> 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * See ip_input.c for original log 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * Fixes: 221da177e4SLinus Torvalds * Alan Cox : Missing nonblock feature in ip_build_xmit. 231da177e4SLinus Torvalds * Mike Kilburn : htons() missing in ip_build_xmit. 241da177e4SLinus Torvalds * Bradford Johnson: Fix faulty handling of some frames when 251da177e4SLinus Torvalds * no route is found. 261da177e4SLinus Torvalds * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit 271da177e4SLinus Torvalds * (in case if packet not accepted by 281da177e4SLinus Torvalds * output firewall rules) 291da177e4SLinus Torvalds * Mike McLagan : Routing by source 301da177e4SLinus Torvalds * Alexey Kuznetsov: use new route cache 311da177e4SLinus Torvalds * Andi Kleen: Fix broken PMTU recovery and remove 321da177e4SLinus Torvalds * some redundant tests. 331da177e4SLinus Torvalds * Vitaly E. Lavrov : Transparent proxy revived after year coma. 341da177e4SLinus Torvalds * Andi Kleen : Replace ip_reply with ip_send_reply. 351da177e4SLinus Torvalds * Andi Kleen : Split fast and slow ip_build_xmit path 361da177e4SLinus Torvalds * for decreased register pressure on x86 37a66e04ceSBhaskar Chowdhury * and more readability. 381da177e4SLinus Torvalds * Marc Boucher : When call_out_firewall returns FW_QUEUE, 391da177e4SLinus Torvalds * silently drop skb instead of failing with -EPERM. 401da177e4SLinus Torvalds * Detlev Wengorz : Copy protocol for fragments. 411da177e4SLinus Torvalds * Hirokazu Takahashi: HW checksumming for outgoing UDP 421da177e4SLinus Torvalds * datagrams. 431da177e4SLinus Torvalds * Hirokazu Takahashi: sendfile() on UDP works now. 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds 467c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 471da177e4SLinus Torvalds #include <linux/module.h> 481da177e4SLinus Torvalds #include <linux/types.h> 491da177e4SLinus Torvalds #include <linux/kernel.h> 501da177e4SLinus Torvalds #include <linux/mm.h> 511da177e4SLinus Torvalds #include <linux/string.h> 521da177e4SLinus Torvalds #include <linux/errno.h> 53a1f8e7f7SAl Viro #include <linux/highmem.h> 545a0e3ad6STejun Heo #include <linux/slab.h> 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds #include <linux/socket.h> 571da177e4SLinus Torvalds #include <linux/sockios.h> 581da177e4SLinus Torvalds #include <linux/in.h> 591da177e4SLinus Torvalds #include <linux/inet.h> 601da177e4SLinus Torvalds #include <linux/netdevice.h> 611da177e4SLinus Torvalds #include <linux/etherdevice.h> 621da177e4SLinus Torvalds #include <linux/proc_fs.h> 631da177e4SLinus Torvalds #include <linux/stat.h> 641da177e4SLinus Torvalds #include <linux/init.h> 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds #include <net/snmp.h> 671da177e4SLinus Torvalds #include <net/ip.h> 681da177e4SLinus Torvalds #include <net/protocol.h> 691da177e4SLinus Torvalds #include <net/route.h> 70cfacb057SPatrick McHardy #include <net/xfrm.h> 711da177e4SLinus Torvalds #include <linux/skbuff.h> 721da177e4SLinus Torvalds #include <net/sock.h> 731da177e4SLinus Torvalds #include <net/arp.h> 741da177e4SLinus Torvalds #include <net/icmp.h> 751da177e4SLinus Torvalds #include <net/checksum.h> 76d457a0e3SEric Dumazet #include <net/gso.h> 771da177e4SLinus Torvalds #include <net/inetpeer.h> 78ba9e04a7SWei Wang #include <net/inet_ecn.h> 7914972cbdSRoopa Prabhu #include <net/lwtunnel.h> 8033b48679SDaniel Mack #include <linux/bpf-cgroup.h> 811da177e4SLinus Torvalds #include <linux/igmp.h> 821da177e4SLinus Torvalds #include <linux/netfilter_ipv4.h> 831da177e4SLinus Torvalds #include <linux/netfilter_bridge.h> 841da177e4SLinus Torvalds #include <linux/netlink.h> 856cbb0df7SArnaldo Carvalho de Melo #include <linux/tcp.h> 861da177e4SLinus Torvalds 87694869b3SEric W. Biederman static int 88694869b3SEric W. Biederman ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 89c5501eb3SFlorian Westphal unsigned int mtu, 90694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *)); 9149d16b23SAndy Zhou 921da177e4SLinus Torvalds /* Generate a checksum for an outgoing IP datagram. */ 932fbd9679SDenis Efremov void ip_send_check(struct iphdr *iph) 941da177e4SLinus Torvalds { 951da177e4SLinus Torvalds iph->check = 0; 961da177e4SLinus Torvalds iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 971da177e4SLinus Torvalds } 984bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_send_check); 991da177e4SLinus Torvalds 100cf91a99dSEric W. Biederman int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 101c439cb2eSHerbert Xu { 102c439cb2eSHerbert Xu struct iphdr *iph = ip_hdr(skb); 103c439cb2eSHerbert Xu 104b1a78b9bSXin Long iph_set_totlen(iph, skb->len); 105c439cb2eSHerbert Xu ip_send_check(iph); 106a8e3e1a9SDavid Ahern 107a8e3e1a9SDavid Ahern /* if egress device is enslaved to an L3 master device pass the 108a8e3e1a9SDavid Ahern * skb to its handler for processing 109a8e3e1a9SDavid Ahern */ 110a8e3e1a9SDavid Ahern skb = l3mdev_ip_out(sk, skb); 111a8e3e1a9SDavid Ahern if (unlikely(!skb)) 112a8e3e1a9SDavid Ahern return 0; 113a8e3e1a9SDavid Ahern 114f4180439SEli Cooper skb->protocol = htons(ETH_P_IP); 115f4180439SEli Cooper 11629a26a56SEric W. Biederman return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 11729a26a56SEric W. Biederman net, sk, skb, NULL, skb_dst(skb)->dev, 11813206b6bSEric W. Biederman dst_output); 1197026b1ddSDavid Miller } 1207026b1ddSDavid Miller 12133224b16SEric W. Biederman int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 122c439cb2eSHerbert Xu { 123c439cb2eSHerbert Xu int err; 124c439cb2eSHerbert Xu 125cf91a99dSEric W. Biederman err = __ip_local_out(net, sk, skb); 126c439cb2eSHerbert Xu if (likely(err == 1)) 12713206b6bSEric W. Biederman err = dst_output(net, sk, skb); 128c439cb2eSHerbert Xu 129c439cb2eSHerbert Xu return err; 130c439cb2eSHerbert Xu } 131e2cb77dbSEric W. Biederman EXPORT_SYMBOL_GPL(ip_local_out); 132c439cb2eSHerbert Xu 133abc17a11SEric Dumazet static inline int ip_select_ttl(const struct inet_sock *inet, 134abc17a11SEric Dumazet const struct dst_entry *dst) 1351da177e4SLinus Torvalds { 13610f42426SEric Dumazet int ttl = READ_ONCE(inet->uc_ttl); 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds if (ttl < 0) 139323e126fSDavid S. Miller ttl = ip4_dst_hoplimit(dst); 1401da177e4SLinus Torvalds return ttl; 1411da177e4SLinus Torvalds } 1421da177e4SLinus Torvalds 1431da177e4SLinus Torvalds /* 1441da177e4SLinus Torvalds * Add an ip header to a skbuff and send it out. 1451da177e4SLinus Torvalds * 1461da177e4SLinus Torvalds */ 147cfe673b0SEric Dumazet int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 148de033b7dSWei Wang __be32 saddr, __be32 daddr, struct ip_options_rcu *opt, 149de033b7dSWei Wang u8 tos) 1501da177e4SLinus Torvalds { 151abc17a11SEric Dumazet const struct inet_sock *inet = inet_sk(sk); 152511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb); 15377589ce0SEric W. Biederman struct net *net = sock_net(sk); 1541da177e4SLinus Torvalds struct iphdr *iph; 1551da177e4SLinus Torvalds 1561da177e4SLinus Torvalds /* Build the IP header. */ 157f6d8bd05SEric Dumazet skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); 1588856dfa3SArnaldo Carvalho de Melo skb_reset_network_header(skb); 159eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb); 1601da177e4SLinus Torvalds iph->version = 4; 1611da177e4SLinus Torvalds iph->ihl = 5; 162de033b7dSWei Wang iph->tos = tos; 163d8d1f30bSChangli Gao iph->ttl = ip_select_ttl(inet, &rt->dst); 164dd927a26SDavid S. Miller iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 165dd927a26SDavid S. Miller iph->saddr = saddr; 1661da177e4SLinus Torvalds iph->protocol = sk->sk_protocol; 167970a5a3eSEric Dumazet /* Do not bother generating IPID for small packets (eg SYNACK) */ 168970a5a3eSEric Dumazet if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { 169cfe673b0SEric Dumazet iph->frag_off = htons(IP_DF); 170cfe673b0SEric Dumazet iph->id = 0; 171cfe673b0SEric Dumazet } else { 172cfe673b0SEric Dumazet iph->frag_off = 0; 173970a5a3eSEric Dumazet /* TCP packets here are SYNACK with fat IPv4/TCP options. 174970a5a3eSEric Dumazet * Avoid using the hashed IP ident generator. 175970a5a3eSEric Dumazet */ 176970a5a3eSEric Dumazet if (sk->sk_protocol == IPPROTO_TCP) 1777e3cf084SJason A. Donenfeld iph->id = (__force __be16)get_random_u16(); 178970a5a3eSEric Dumazet else 17977589ce0SEric W. Biederman __ip_select_ident(net, iph, 1); 180cfe673b0SEric Dumazet } 1811da177e4SLinus Torvalds 182f6d8bd05SEric Dumazet if (opt && opt->opt.optlen) { 183f6d8bd05SEric Dumazet iph->ihl += opt->opt.optlen>>2; 1844f0e3040SJakub Kicinski ip_options_build(skb, &opt->opt, daddr, rt); 1851da177e4SLinus Torvalds } 1861da177e4SLinus Torvalds 1878bf43be7SEric Dumazet skb->priority = READ_ONCE(sk->sk_priority); 188e05a90ecSJamal Hadi Salim if (!skb->mark) 1893c5b4d69SEric Dumazet skb->mark = READ_ONCE(sk->sk_mark); 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds /* Send it out. */ 19233224b16SEric W. Biederman return ip_local_out(net, skb->sk, skb); 1931da177e4SLinus Torvalds } 194d8c97a94SArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); 195d8c97a94SArnaldo Carvalho de Melo 196694869b3SEric W. Biederman static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) 1971da177e4SLinus Torvalds { 198adf30907SEric Dumazet struct dst_entry *dst = skb_dst(skb); 19980787ebcSMitsuru Chinen struct rtable *rt = (struct rtable *)dst; 2001da177e4SLinus Torvalds struct net_device *dev = dst->dev; 201c2636b4dSChuck Lever unsigned int hh_len = LL_RESERVED_SPACE(dev); 202f6b72b62SDavid S. Miller struct neighbour *neigh; 2035c9f7c1dSDavid Ahern bool is_v6gw = false; 2041da177e4SLinus Torvalds 205edf391ffSNeil Horman if (rt->rt_type == RTN_MULTICAST) { 2064ba1bf42SEric W. Biederman IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); 207edf391ffSNeil Horman } else if (rt->rt_type == RTN_BROADCAST) 2084ba1bf42SEric W. Biederman IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len); 20980787ebcSMitsuru Chinen 210*e4da8c78SHeng Guo /* OUTOCTETS should be counted after fragment */ 211*e4da8c78SHeng Guo IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); 212*e4da8c78SHeng Guo 2133b04dddeSStephen Hemminger if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2145678a595SVasily Averin skb = skb_expand_head(skb, hh_len); 2155678a595SVasily Averin if (!skb) 2161da177e4SLinus Torvalds return -ENOMEM; 2171da177e4SLinus Torvalds } 2181da177e4SLinus Torvalds 21914972cbdSRoopa Prabhu if (lwtunnel_xmit_redirect(dst->lwtstate)) { 22014972cbdSRoopa Prabhu int res = lwtunnel_xmit(skb); 22114972cbdSRoopa Prabhu 222a171fbecSYan Zhai if (res != LWTUNNEL_XMIT_CONTINUE) 22314972cbdSRoopa Prabhu return res; 22414972cbdSRoopa Prabhu } 22514972cbdSRoopa Prabhu 22609eed119SEric Dumazet rcu_read_lock(); 2275c9f7c1dSDavid Ahern neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2289871f1adSVasiliy Kulikov if (!IS_ERR(neigh)) { 2294ff06203SJulian Anastasov int res; 2304ff06203SJulian Anastasov 2314ff06203SJulian Anastasov sock_confirm_neigh(skb, neigh); 2325c9f7c1dSDavid Ahern /* if crossing protocols, can not use the cached header */ 2335c9f7c1dSDavid Ahern res = neigh_output(neigh, skb, is_v6gw); 23409eed119SEric Dumazet rcu_read_unlock(); 235f2c31e32SEric Dumazet return res; 236f2c31e32SEric Dumazet } 23709eed119SEric Dumazet rcu_read_unlock(); 23805e3aa09SDavid S. Miller 239e87cc472SJoe Perches net_dbg_ratelimited("%s: No header cache and no neighbour!\n", 240e87cc472SJoe Perches __func__); 2415e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL); 242c67180efSxu xin return PTR_ERR(neigh); 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 245694869b3SEric W. Biederman static int ip_finish_output_gso(struct net *net, struct sock *sk, 246694869b3SEric W. Biederman struct sk_buff *skb, unsigned int mtu) 247c7ba65d7SFlorian Westphal { 24888bebdf5SJason A. Donenfeld struct sk_buff *segs, *nskb; 249c7ba65d7SFlorian Westphal netdev_features_t features; 250c7ba65d7SFlorian Westphal int ret = 0; 251c7ba65d7SFlorian Westphal 2529ee6c5dcSLance Richardson /* common case: seglen is <= mtu 253359ebda2SShmulik Ladkani */ 254779b7931SDaniel Axtens if (skb_gso_validate_network_len(skb, mtu)) 255694869b3SEric W. Biederman return ip_finish_output2(net, sk, skb); 256c7ba65d7SFlorian Westphal 2570ace81ecSLance Richardson /* Slowpath - GSO segment length exceeds the egress MTU. 258c7ba65d7SFlorian Westphal * 2590ace81ecSLance Richardson * This can happen in several cases: 2600ace81ecSLance Richardson * - Forwarding of a TCP GRO skb, when DF flag is not set. 2610ace81ecSLance Richardson * - Forwarding of an skb that arrived on a virtualization interface 2620ace81ecSLance Richardson * (virtio-net/vhost/tap) with TSO/GSO size set by other network 2630ace81ecSLance Richardson * stack. 2640ace81ecSLance Richardson * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an 2650ace81ecSLance Richardson * interface with a smaller MTU. 2660ace81ecSLance Richardson * - Arriving GRO skb (or GSO skb in a virtualized environment) that is 2670ace81ecSLance Richardson * bridged to a NETIF_F_TSO tunnel stacked over an interface with an 268a66e04ceSBhaskar Chowdhury * insufficient MTU. 269c7ba65d7SFlorian Westphal */ 270c7ba65d7SFlorian Westphal features = netif_skb_features(skb); 271a08e7fd9SCambda Zhu BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET); 272c7ba65d7SFlorian Westphal segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 273330966e5SFlorian Westphal if (IS_ERR_OR_NULL(segs)) { 274c7ba65d7SFlorian Westphal kfree_skb(skb); 275c7ba65d7SFlorian Westphal return -ENOMEM; 276c7ba65d7SFlorian Westphal } 277c7ba65d7SFlorian Westphal 278c7ba65d7SFlorian Westphal consume_skb(skb); 279c7ba65d7SFlorian Westphal 28088bebdf5SJason A. Donenfeld skb_list_walk_safe(segs, segs, nskb) { 281c7ba65d7SFlorian Westphal int err; 282c7ba65d7SFlorian Westphal 283a8305bffSDavid S. Miller skb_mark_not_on_list(segs); 284694869b3SEric W. Biederman err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); 285c7ba65d7SFlorian Westphal 286c7ba65d7SFlorian Westphal if (err && ret == 0) 287c7ba65d7SFlorian Westphal ret = err; 28888bebdf5SJason A. Donenfeld } 289c7ba65d7SFlorian Westphal 290c7ba65d7SFlorian Westphal return ret; 291c7ba65d7SFlorian Westphal } 292c7ba65d7SFlorian Westphal 293956fe219Sbrakmo static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 2941da177e4SLinus Torvalds { 295c5501eb3SFlorian Westphal unsigned int mtu; 296c5501eb3SFlorian Westphal 2975c901daaSPatrick McHardy #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 2985c901daaSPatrick McHardy /* Policy lookup after SNAT yielded a new policy */ 29900db4124SIan Morris if (skb_dst(skb)->xfrm) { 30048d5cad8SPatrick McHardy IPCB(skb)->flags |= IPSKB_REROUTED; 30113206b6bSEric W. Biederman return dst_output(net, sk, skb); 30248d5cad8SPatrick McHardy } 3035c901daaSPatrick McHardy #endif 304fedbb6b4SShmulik Ladkani mtu = ip_skb_dst_mtu(sk, skb); 305c7ba65d7SFlorian Westphal if (skb_is_gso(skb)) 306694869b3SEric W. Biederman return ip_finish_output_gso(net, sk, skb, mtu); 307c7ba65d7SFlorian Westphal 308bb4cc1a1SFlorian Westphal if (skb->len > mtu || IPCB(skb)->frag_max_size) 309694869b3SEric W. Biederman return ip_fragment(net, sk, skb, mtu, ip_finish_output2); 310c7ba65d7SFlorian Westphal 311694869b3SEric W. Biederman return ip_finish_output2(net, sk, skb); 3121da177e4SLinus Torvalds } 3131da177e4SLinus Torvalds 314956fe219Sbrakmo static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 315956fe219Sbrakmo { 316956fe219Sbrakmo int ret; 317956fe219Sbrakmo 318956fe219Sbrakmo ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 319956fe219Sbrakmo switch (ret) { 320956fe219Sbrakmo case NET_XMIT_SUCCESS: 321956fe219Sbrakmo return __ip_finish_output(net, sk, skb); 322956fe219Sbrakmo case NET_XMIT_CN: 323956fe219Sbrakmo return __ip_finish_output(net, sk, skb) ? : ret; 324956fe219Sbrakmo default: 3255e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); 326956fe219Sbrakmo return ret; 327956fe219Sbrakmo } 328956fe219Sbrakmo } 329956fe219Sbrakmo 33033b48679SDaniel Mack static int ip_mc_finish_output(struct net *net, struct sock *sk, 33133b48679SDaniel Mack struct sk_buff *skb) 33233b48679SDaniel Mack { 3335b18f128SStephen Suryaputra struct rtable *new_rt; 334d96ff269SDavid S. Miller bool do_cn = false; 335d96ff269SDavid S. Miller int ret, err; 33633b48679SDaniel Mack 33733b48679SDaniel Mack ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 338956fe219Sbrakmo switch (ret) { 339956fe219Sbrakmo case NET_XMIT_CN: 340d96ff269SDavid S. Miller do_cn = true; 341a8eceea8SJoe Perches fallthrough; 342d96ff269SDavid S. Miller case NET_XMIT_SUCCESS: 343d96ff269SDavid S. Miller break; 344956fe219Sbrakmo default: 3455e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); 34633b48679SDaniel Mack return ret; 34733b48679SDaniel Mack } 34833b48679SDaniel Mack 3495b18f128SStephen Suryaputra /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting 3505b18f128SStephen Suryaputra * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten, 3515b18f128SStephen Suryaputra * see ipv4_pktinfo_prepare(). 3525b18f128SStephen Suryaputra */ 3535b18f128SStephen Suryaputra new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb)); 3545b18f128SStephen Suryaputra if (new_rt) { 3555b18f128SStephen Suryaputra new_rt->rt_iif = 0; 3565b18f128SStephen Suryaputra skb_dst_drop(skb); 3575b18f128SStephen Suryaputra skb_dst_set(skb, &new_rt->dst); 3585b18f128SStephen Suryaputra } 3595b18f128SStephen Suryaputra 360d96ff269SDavid S. Miller err = dev_loopback_xmit(net, sk, skb); 361d96ff269SDavid S. Miller return (do_cn && err) ? ret : err; 36233b48679SDaniel Mack } 36333b48679SDaniel Mack 364ede2059dSEric W. Biederman int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) 3651da177e4SLinus Torvalds { 366511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb); 367d8d1f30bSChangli Gao struct net_device *dev = rt->dst.dev; 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds /* 3701da177e4SLinus Torvalds * If the indicated interface is up and running, send the packet. 3711da177e4SLinus Torvalds */ 3721da177e4SLinus Torvalds skb->dev = dev; 3731da177e4SLinus Torvalds skb->protocol = htons(ETH_P_IP); 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds /* 3761da177e4SLinus Torvalds * Multicasts are looped back for other local users 3771da177e4SLinus Torvalds */ 3781da177e4SLinus Torvalds 3791da177e4SLinus Torvalds if (rt->rt_flags&RTCF_MULTICAST) { 3807ad6848cSOctavian Purdila if (sk_mc_loop(sk) 3811da177e4SLinus Torvalds #ifdef CONFIG_IP_MROUTE 3821da177e4SLinus Torvalds /* Small optimization: do not loopback not local frames, 3831da177e4SLinus Torvalds which returned after forwarding; they will be dropped 3841da177e4SLinus Torvalds by ip_mr_input in any case. 3851da177e4SLinus Torvalds Note, that local frames are looped back to be delivered 3861da177e4SLinus Torvalds to local recipients. 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds This check is duplicated in ip_mr_input at the moment. 3891da177e4SLinus Torvalds */ 3909d4fb27dSJoe Perches && 3919d4fb27dSJoe Perches ((rt->rt_flags & RTCF_LOCAL) || 3929d4fb27dSJoe Perches !(IPCB(skb)->flags & IPSKB_FORWARDED)) 3931da177e4SLinus Torvalds #endif 3941da177e4SLinus Torvalds ) { 3951da177e4SLinus Torvalds struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 3961da177e4SLinus Torvalds if (newskb) 3979bbc768aSJan Engelhardt NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 39829a26a56SEric W. Biederman net, sk, newskb, NULL, newskb->dev, 39933b48679SDaniel Mack ip_mc_finish_output); 4001da177e4SLinus Torvalds } 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds /* Multicasts with ttl 0 must not go beyond the host */ 4031da177e4SLinus Torvalds 404eddc9ec5SArnaldo Carvalho de Melo if (ip_hdr(skb)->ttl == 0) { 4051da177e4SLinus Torvalds kfree_skb(skb); 4061da177e4SLinus Torvalds return 0; 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds if (rt->rt_flags&RTCF_BROADCAST) { 4111da177e4SLinus Torvalds struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 4121da177e4SLinus Torvalds if (newskb) 41329a26a56SEric W. Biederman NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 41429a26a56SEric W. Biederman net, sk, newskb, NULL, newskb->dev, 41533b48679SDaniel Mack ip_mc_finish_output); 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds 41829a26a56SEric W. Biederman return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 41929a26a56SEric W. Biederman net, sk, skb, NULL, skb->dev, 42029a26a56SEric W. Biederman ip_finish_output, 42148d5cad8SPatrick McHardy !(IPCB(skb)->flags & IPSKB_REROUTED)); 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds 424ede2059dSEric W. Biederman int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) 4251da177e4SLinus Torvalds { 42628f8bfd1SPhil Sutter struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev; 4271bd9bef6SPatrick McHardy 4281bd9bef6SPatrick McHardy skb->dev = dev; 4291bd9bef6SPatrick McHardy skb->protocol = htons(ETH_P_IP); 4301bd9bef6SPatrick McHardy 43129a26a56SEric W. Biederman return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 43228f8bfd1SPhil Sutter net, sk, skb, indev, dev, 43348d5cad8SPatrick McHardy ip_finish_output, 43448d5cad8SPatrick McHardy !(IPCB(skb)->flags & IPSKB_REROUTED)); 4351da177e4SLinus Torvalds } 4366585d7dcSBrian Vazquez EXPORT_SYMBOL(ip_output); 4371da177e4SLinus Torvalds 43884f9307cSEric Dumazet /* 43984f9307cSEric Dumazet * copy saddr and daddr, possibly using 64bit load/stores 44084f9307cSEric Dumazet * Equivalent to : 44184f9307cSEric Dumazet * iph->saddr = fl4->saddr; 44284f9307cSEric Dumazet * iph->daddr = fl4->daddr; 44384f9307cSEric Dumazet */ 44484f9307cSEric Dumazet static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) 44584f9307cSEric Dumazet { 44684f9307cSEric Dumazet BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != 44784f9307cSEric Dumazet offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); 4486321c7acSGustavo A. R. Silva 4496321c7acSGustavo A. R. Silva iph->saddr = fl4->saddr; 4506321c7acSGustavo A. R. Silva iph->daddr = fl4->daddr; 45184f9307cSEric Dumazet } 45284f9307cSEric Dumazet 453b0270e91SEric Dumazet /* Note: skb->sk can be different from sk, in case of tunnels */ 45469b9e1e0SXin Long int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 45569b9e1e0SXin Long __u8 tos) 4561da177e4SLinus Torvalds { 4571da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 45877589ce0SEric W. Biederman struct net *net = sock_net(sk); 459f6d8bd05SEric Dumazet struct ip_options_rcu *inet_opt; 460b57ae01aSDavid S. Miller struct flowi4 *fl4; 4611da177e4SLinus Torvalds struct rtable *rt; 4621da177e4SLinus Torvalds struct iphdr *iph; 463ab6e3febSEric Dumazet int res; 4641da177e4SLinus Torvalds 4651da177e4SLinus Torvalds /* Skip all of this if the packet is already routed, 4661da177e4SLinus Torvalds * f.e. by something like SCTP. 4671da177e4SLinus Torvalds */ 468ab6e3febSEric Dumazet rcu_read_lock(); 469f6d8bd05SEric Dumazet inet_opt = rcu_dereference(inet->inet_opt); 470ea4fc0d6SDavid S. Miller fl4 = &fl->u.ip4; 471511c3f92SEric Dumazet rt = skb_rtable(skb); 47200db4124SIan Morris if (rt) 4731da177e4SLinus Torvalds goto packet_routed; 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds /* Make sure we can route this packet. */ 4761da177e4SLinus Torvalds rt = (struct rtable *)__sk_dst_check(sk, 0); 47751456b29SIan Morris if (!rt) { 4783ca3c68eSAl Viro __be32 daddr; 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds /* Use correct destination address if we have options. */ 481c720c7e8SEric Dumazet daddr = inet->inet_daddr; 482f6d8bd05SEric Dumazet if (inet_opt && inet_opt->opt.srr) 483f6d8bd05SEric Dumazet daddr = inet_opt->opt.faddr; 4841da177e4SLinus Torvalds 4851da177e4SLinus Torvalds /* If this fails, retransmit mechanism of transport layer will 4861da177e4SLinus Torvalds * keep trying until route appears or the connection times 4871da177e4SLinus Torvalds * itself out. 4881da177e4SLinus Torvalds */ 48977589ce0SEric W. Biederman rt = ip_route_output_ports(net, fl4, sk, 49078fbfd8aSDavid S. Miller daddr, inet->inet_saddr, 49178fbfd8aSDavid S. Miller inet->inet_dport, 49278fbfd8aSDavid S. Miller inet->inet_sport, 49378fbfd8aSDavid S. Miller sk->sk_protocol, 49469b9e1e0SXin Long RT_CONN_FLAGS_TOS(sk, tos), 49578fbfd8aSDavid S. Miller sk->sk_bound_dev_if); 496b23dd4feSDavid S. Miller if (IS_ERR(rt)) 4971da177e4SLinus Torvalds goto no_route; 498d8d1f30bSChangli Gao sk_setup_caps(sk, &rt->dst); 4991da177e4SLinus Torvalds } 500d8d1f30bSChangli Gao skb_dst_set_noref(skb, &rt->dst); 5011da177e4SLinus Torvalds 5021da177e4SLinus Torvalds packet_routed: 50377d5bc7eSDavid Ahern if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) 5041da177e4SLinus Torvalds goto no_route; 5051da177e4SLinus Torvalds 5061da177e4SLinus Torvalds /* OK, we know where to send it, allocate and build IP header. */ 507f6d8bd05SEric Dumazet skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); 5088856dfa3SArnaldo Carvalho de Melo skb_reset_network_header(skb); 509eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb); 51069b9e1e0SXin Long *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff)); 51160ff7467SWANG Cong if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) 5121da177e4SLinus Torvalds iph->frag_off = htons(IP_DF); 5131da177e4SLinus Torvalds else 5141da177e4SLinus Torvalds iph->frag_off = 0; 515d8d1f30bSChangli Gao iph->ttl = ip_select_ttl(inet, &rt->dst); 5161da177e4SLinus Torvalds iph->protocol = sk->sk_protocol; 51784f9307cSEric Dumazet ip_copy_addrs(iph, fl4); 51884f9307cSEric Dumazet 5191da177e4SLinus Torvalds /* Transport layer set skb->h.foo itself. */ 5201da177e4SLinus Torvalds 521f6d8bd05SEric Dumazet if (inet_opt && inet_opt->opt.optlen) { 522f6d8bd05SEric Dumazet iph->ihl += inet_opt->opt.optlen >> 2; 5234f0e3040SJakub Kicinski ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt); 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds 52677589ce0SEric W. Biederman ip_select_ident_segs(net, skb, sk, 527b6a7719aSHannes Frederic Sowa skb_shinfo(skb)->gso_segs ?: 1); 5281da177e4SLinus Torvalds 529b0270e91SEric Dumazet /* TODO : should we use skb->sk here instead of sk ? */ 5308bf43be7SEric Dumazet skb->priority = READ_ONCE(sk->sk_priority); 5313c5b4d69SEric Dumazet skb->mark = READ_ONCE(sk->sk_mark); 5321da177e4SLinus Torvalds 53333224b16SEric W. Biederman res = ip_local_out(net, sk, skb); 534ab6e3febSEric Dumazet rcu_read_unlock(); 535ab6e3febSEric Dumazet return res; 5361da177e4SLinus Torvalds 5371da177e4SLinus Torvalds no_route: 538ab6e3febSEric Dumazet rcu_read_unlock(); 53977589ce0SEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 5405e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES); 5411da177e4SLinus Torvalds return -EHOSTUNREACH; 5421da177e4SLinus Torvalds } 54369b9e1e0SXin Long EXPORT_SYMBOL(__ip_queue_xmit); 5441da177e4SLinus Torvalds 54505e22e83SEric Dumazet int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) 54605e22e83SEric Dumazet { 54705e22e83SEric Dumazet return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); 54805e22e83SEric Dumazet } 54905e22e83SEric Dumazet EXPORT_SYMBOL(ip_queue_xmit); 55005e22e83SEric Dumazet 5511da177e4SLinus Torvalds static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) 5521da177e4SLinus Torvalds { 5531da177e4SLinus Torvalds to->pkt_type = from->pkt_type; 5541da177e4SLinus Torvalds to->priority = from->priority; 5551da177e4SLinus Torvalds to->protocol = from->protocol; 556d2f0c961SShmulik Ladkani to->skb_iif = from->skb_iif; 557adf30907SEric Dumazet skb_dst_drop(to); 558fe76cda3SEric Dumazet skb_dst_copy(to, from); 5591da177e4SLinus Torvalds to->dev = from->dev; 56082e91ffeSThomas Graf to->mark = from->mark; 5611da177e4SLinus Torvalds 5623dd1c9a1SPaolo Abeni skb_copy_hash(to, from); 5633dd1c9a1SPaolo Abeni 5641da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 5651da177e4SLinus Torvalds to->tc_index = from->tc_index; 5661da177e4SLinus Torvalds #endif 567e7ac05f3SYasuyuki Kozakai nf_copy(to, from); 568df5042f4SFlorian Westphal skb_ext_copy(to, from); 5696ca40d4eSJavier Martinez Canillas #if IS_ENABLED(CONFIG_IP_VS) 570c98d80edSJulian Anastasov to->ipvs_property = from->ipvs_property; 571c98d80edSJulian Anastasov #endif 572984bc16cSJames Morris skb_copy_secmark(to, from); 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds 575694869b3SEric W. Biederman static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 576c5501eb3SFlorian Westphal unsigned int mtu, 577694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *)) 57849d16b23SAndy Zhou { 57949d16b23SAndy Zhou struct iphdr *iph = ip_hdr(skb); 58049d16b23SAndy Zhou 581d6b915e2SFlorian Westphal if ((iph->frag_off & htons(IP_DF)) == 0) 582694869b3SEric W. Biederman return ip_do_fragment(net, sk, skb, output); 583d6b915e2SFlorian Westphal 584d6b915e2SFlorian Westphal if (unlikely(!skb->ignore_df || 58549d16b23SAndy Zhou (IPCB(skb)->frag_max_size && 58649d16b23SAndy Zhou IPCB(skb)->frag_max_size > mtu))) { 5879479b0afSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 58849d16b23SAndy Zhou icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 58949d16b23SAndy Zhou htonl(mtu)); 59049d16b23SAndy Zhou kfree_skb(skb); 59149d16b23SAndy Zhou return -EMSGSIZE; 59249d16b23SAndy Zhou } 59349d16b23SAndy Zhou 594694869b3SEric W. Biederman return ip_do_fragment(net, sk, skb, output); 59549d16b23SAndy Zhou } 59649d16b23SAndy Zhou 597c8b17be0SPablo Neira Ayuso void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, 598c8b17be0SPablo Neira Ayuso unsigned int hlen, struct ip_fraglist_iter *iter) 599c8b17be0SPablo Neira Ayuso { 600c8b17be0SPablo Neira Ayuso unsigned int first_len = skb_pagelen(skb); 601c8b17be0SPablo Neira Ayuso 602b7034146SEric Dumazet iter->frag = skb_shinfo(skb)->frag_list; 603c8b17be0SPablo Neira Ayuso skb_frag_list_init(skb); 604c8b17be0SPablo Neira Ayuso 605c8b17be0SPablo Neira Ayuso iter->offset = 0; 606c8b17be0SPablo Neira Ayuso iter->iph = iph; 607c8b17be0SPablo Neira Ayuso iter->hlen = hlen; 608c8b17be0SPablo Neira Ayuso 609c8b17be0SPablo Neira Ayuso skb->data_len = first_len - skb_headlen(skb); 610c8b17be0SPablo Neira Ayuso skb->len = first_len; 611c8b17be0SPablo Neira Ayuso iph->tot_len = htons(first_len); 612c8b17be0SPablo Neira Ayuso iph->frag_off = htons(IP_MF); 613c8b17be0SPablo Neira Ayuso ip_send_check(iph); 614c8b17be0SPablo Neira Ayuso } 615c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_init); 616c8b17be0SPablo Neira Ayuso 617c8b17be0SPablo Neira Ayuso void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) 618c8b17be0SPablo Neira Ayuso { 619c8b17be0SPablo Neira Ayuso unsigned int hlen = iter->hlen; 620c8b17be0SPablo Neira Ayuso struct iphdr *iph = iter->iph; 621c8b17be0SPablo Neira Ayuso struct sk_buff *frag; 622c8b17be0SPablo Neira Ayuso 623c8b17be0SPablo Neira Ayuso frag = iter->frag; 624c8b17be0SPablo Neira Ayuso frag->ip_summed = CHECKSUM_NONE; 625c8b17be0SPablo Neira Ayuso skb_reset_transport_header(frag); 626c8b17be0SPablo Neira Ayuso __skb_push(frag, hlen); 627c8b17be0SPablo Neira Ayuso skb_reset_network_header(frag); 628c8b17be0SPablo Neira Ayuso memcpy(skb_network_header(frag), iph, hlen); 629c8b17be0SPablo Neira Ayuso iter->iph = ip_hdr(frag); 630c8b17be0SPablo Neira Ayuso iph = iter->iph; 631c8b17be0SPablo Neira Ayuso iph->tot_len = htons(frag->len); 632c8b17be0SPablo Neira Ayuso ip_copy_metadata(frag, skb); 633c8b17be0SPablo Neira Ayuso iter->offset += skb->len - hlen; 634c8b17be0SPablo Neira Ayuso iph->frag_off = htons(iter->offset >> 3); 635c8b17be0SPablo Neira Ayuso if (frag->next) 636c8b17be0SPablo Neira Ayuso iph->frag_off |= htons(IP_MF); 637c8b17be0SPablo Neira Ayuso /* Ready, complete checksum */ 638c8b17be0SPablo Neira Ayuso ip_send_check(iph); 639c8b17be0SPablo Neira Ayuso } 640c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_prepare); 641c8b17be0SPablo Neira Ayuso 642065ff79fSPablo Neira Ayuso void ip_frag_init(struct sk_buff *skb, unsigned int hlen, 643e7a409c3SEric Dumazet unsigned int ll_rs, unsigned int mtu, bool DF, 644065ff79fSPablo Neira Ayuso struct ip_frag_state *state) 645065ff79fSPablo Neira Ayuso { 646065ff79fSPablo Neira Ayuso struct iphdr *iph = ip_hdr(skb); 647065ff79fSPablo Neira Ayuso 648e7a409c3SEric Dumazet state->DF = DF; 649065ff79fSPablo Neira Ayuso state->hlen = hlen; 650065ff79fSPablo Neira Ayuso state->ll_rs = ll_rs; 651065ff79fSPablo Neira Ayuso state->mtu = mtu; 652065ff79fSPablo Neira Ayuso 653065ff79fSPablo Neira Ayuso state->left = skb->len - hlen; /* Space per frame */ 654065ff79fSPablo Neira Ayuso state->ptr = hlen; /* Where to start from */ 655065ff79fSPablo Neira Ayuso 656065ff79fSPablo Neira Ayuso state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3; 657065ff79fSPablo Neira Ayuso state->not_last_frag = iph->frag_off & htons(IP_MF); 658065ff79fSPablo Neira Ayuso } 659065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_init); 660065ff79fSPablo Neira Ayuso 66119c3401aSPablo Neira Ayuso static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, 662faf482caSYajun Deng bool first_frag) 66319c3401aSPablo Neira Ayuso { 66419c3401aSPablo Neira Ayuso /* Copy the flags to each fragment. */ 66519c3401aSPablo Neira Ayuso IPCB(to)->flags = IPCB(from)->flags; 66619c3401aSPablo Neira Ayuso 66719c3401aSPablo Neira Ayuso /* ANK: dirty, but effective trick. Upgrade options only if 66819c3401aSPablo Neira Ayuso * the segment to be fragmented was THE FIRST (otherwise, 66919c3401aSPablo Neira Ayuso * options are already fixed) and make it ONCE 67019c3401aSPablo Neira Ayuso * on the initial skb, so that all the following fragments 67119c3401aSPablo Neira Ayuso * will inherit fixed options. 67219c3401aSPablo Neira Ayuso */ 67319c3401aSPablo Neira Ayuso if (first_frag) 67419c3401aSPablo Neira Ayuso ip_options_fragment(from); 67519c3401aSPablo Neira Ayuso } 67619c3401aSPablo Neira Ayuso 677065ff79fSPablo Neira Ayuso struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state) 678065ff79fSPablo Neira Ayuso { 679065ff79fSPablo Neira Ayuso unsigned int len = state->left; 680065ff79fSPablo Neira Ayuso struct sk_buff *skb2; 681065ff79fSPablo Neira Ayuso struct iphdr *iph; 682065ff79fSPablo Neira Ayuso 683065ff79fSPablo Neira Ayuso /* IF: it doesn't fit, use 'mtu' - the data space left */ 684065ff79fSPablo Neira Ayuso if (len > state->mtu) 685065ff79fSPablo Neira Ayuso len = state->mtu; 686065ff79fSPablo Neira Ayuso /* IF: we are not sending up to and including the packet end 687065ff79fSPablo Neira Ayuso then align the next start on an eight byte boundary */ 688065ff79fSPablo Neira Ayuso if (len < state->left) { 689065ff79fSPablo Neira Ayuso len &= ~7; 690065ff79fSPablo Neira Ayuso } 691065ff79fSPablo Neira Ayuso 692065ff79fSPablo Neira Ayuso /* Allocate buffer */ 693065ff79fSPablo Neira Ayuso skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC); 694065ff79fSPablo Neira Ayuso if (!skb2) 695065ff79fSPablo Neira Ayuso return ERR_PTR(-ENOMEM); 696065ff79fSPablo Neira Ayuso 697065ff79fSPablo Neira Ayuso /* 698065ff79fSPablo Neira Ayuso * Set up data on packet 699065ff79fSPablo Neira Ayuso */ 700065ff79fSPablo Neira Ayuso 701065ff79fSPablo Neira Ayuso ip_copy_metadata(skb2, skb); 702065ff79fSPablo Neira Ayuso skb_reserve(skb2, state->ll_rs); 703065ff79fSPablo Neira Ayuso skb_put(skb2, len + state->hlen); 704065ff79fSPablo Neira Ayuso skb_reset_network_header(skb2); 705065ff79fSPablo Neira Ayuso skb2->transport_header = skb2->network_header + state->hlen; 706065ff79fSPablo Neira Ayuso 707065ff79fSPablo Neira Ayuso /* 708065ff79fSPablo Neira Ayuso * Charge the memory for the fragment to any owner 709065ff79fSPablo Neira Ayuso * it might possess 710065ff79fSPablo Neira Ayuso */ 711065ff79fSPablo Neira Ayuso 712065ff79fSPablo Neira Ayuso if (skb->sk) 713065ff79fSPablo Neira Ayuso skb_set_owner_w(skb2, skb->sk); 714065ff79fSPablo Neira Ayuso 715065ff79fSPablo Neira Ayuso /* 716065ff79fSPablo Neira Ayuso * Copy the packet header into the new buffer. 717065ff79fSPablo Neira Ayuso */ 718065ff79fSPablo Neira Ayuso 719065ff79fSPablo Neira Ayuso skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen); 720065ff79fSPablo Neira Ayuso 721065ff79fSPablo Neira Ayuso /* 722065ff79fSPablo Neira Ayuso * Copy a block of the IP datagram. 723065ff79fSPablo Neira Ayuso */ 724065ff79fSPablo Neira Ayuso if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len)) 725065ff79fSPablo Neira Ayuso BUG(); 726065ff79fSPablo Neira Ayuso state->left -= len; 727065ff79fSPablo Neira Ayuso 728065ff79fSPablo Neira Ayuso /* 729065ff79fSPablo Neira Ayuso * Fill in the new header fields. 730065ff79fSPablo Neira Ayuso */ 731065ff79fSPablo Neira Ayuso iph = ip_hdr(skb2); 732065ff79fSPablo Neira Ayuso iph->frag_off = htons((state->offset >> 3)); 733e7a409c3SEric Dumazet if (state->DF) 734e7a409c3SEric Dumazet iph->frag_off |= htons(IP_DF); 735065ff79fSPablo Neira Ayuso 736065ff79fSPablo Neira Ayuso /* 737065ff79fSPablo Neira Ayuso * Added AC : If we are fragmenting a fragment that's not the 738065ff79fSPablo Neira Ayuso * last fragment then keep MF on each bit 739065ff79fSPablo Neira Ayuso */ 740065ff79fSPablo Neira Ayuso if (state->left > 0 || state->not_last_frag) 741065ff79fSPablo Neira Ayuso iph->frag_off |= htons(IP_MF); 742065ff79fSPablo Neira Ayuso state->ptr += len; 743065ff79fSPablo Neira Ayuso state->offset += len; 744065ff79fSPablo Neira Ayuso 745065ff79fSPablo Neira Ayuso iph->tot_len = htons(len + state->hlen); 746065ff79fSPablo Neira Ayuso 747065ff79fSPablo Neira Ayuso ip_send_check(iph); 748065ff79fSPablo Neira Ayuso 749065ff79fSPablo Neira Ayuso return skb2; 750065ff79fSPablo Neira Ayuso } 751065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_next); 752065ff79fSPablo Neira Ayuso 7531da177e4SLinus Torvalds /* 7541da177e4SLinus Torvalds * This IP datagram is too large to be sent in one piece. Break it up into 7551da177e4SLinus Torvalds * smaller pieces (each of size equal to IP header plus 7561da177e4SLinus Torvalds * a block of the data of the original IP data part) that will yet fit in a 7571da177e4SLinus Torvalds * single device frame, and queue such a frame for sending. 7581da177e4SLinus Torvalds */ 7591da177e4SLinus Torvalds 760694869b3SEric W. Biederman int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 761694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *)) 7621da177e4SLinus Torvalds { 7631da177e4SLinus Torvalds struct iphdr *iph; 7641da177e4SLinus Torvalds struct sk_buff *skb2; 765a1ac9c8aSMartin KaFai Lau bool mono_delivery_time = skb->mono_delivery_time; 766511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb); 767065ff79fSPablo Neira Ayuso unsigned int mtu, hlen, ll_rs; 768c8b17be0SPablo Neira Ayuso struct ip_fraglist_iter iter; 7699669fffcSEric Dumazet ktime_t tstamp = skb->tstamp; 770065ff79fSPablo Neira Ayuso struct ip_frag_state state; 7711da177e4SLinus Torvalds int err = 0; 7721da177e4SLinus Torvalds 773dbd3393cSHannes Frederic Sowa /* for offloaded checksums cleanup checksum before fragmentation */ 774dbd3393cSHannes Frederic Sowa if (skb->ip_summed == CHECKSUM_PARTIAL && 775dbd3393cSHannes Frederic Sowa (err = skb_checksum_help(skb))) 776dbd3393cSHannes Frederic Sowa goto fail; 777dbd3393cSHannes Frederic Sowa 7781da177e4SLinus Torvalds /* 7791da177e4SLinus Torvalds * Point into the IP datagram header. 7801da177e4SLinus Torvalds */ 7811da177e4SLinus Torvalds 782eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb); 7831da177e4SLinus Torvalds 784fedbb6b4SShmulik Ladkani mtu = ip_skb_dst_mtu(sk, skb); 785d6b915e2SFlorian Westphal if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 786d6b915e2SFlorian Westphal mtu = IPCB(skb)->frag_max_size; 7871da177e4SLinus Torvalds 7881da177e4SLinus Torvalds /* 7891da177e4SLinus Torvalds * Setup starting values. 7901da177e4SLinus Torvalds */ 7911da177e4SLinus Torvalds 7921da177e4SLinus Torvalds hlen = iph->ihl * 4; 793f87c10a8SHannes Frederic Sowa mtu = mtu - hlen; /* Size of data space */ 79489cee8b1SHerbert Xu IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; 795254d900bSVasily Averin ll_rs = LL_RESERVED_SPACE(rt->dst.dev); 7961da177e4SLinus Torvalds 7971da177e4SLinus Torvalds /* When frag_list is given, use it. First, check its validity: 7981da177e4SLinus Torvalds * some transformers could create wrong frag_list or break existing 7991da177e4SLinus Torvalds * one, it is not prohibited. In this case fall back to copying. 8001da177e4SLinus Torvalds * 8011da177e4SLinus Torvalds * LATER: this step can be merged to real generation of fragments, 8021da177e4SLinus Torvalds * we can switch to copy when see the first bad fragment. 8031da177e4SLinus Torvalds */ 80421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 8053d13008eSEric Dumazet struct sk_buff *frag, *frag2; 806c72d8cdaSAlexey Dobriyan unsigned int first_len = skb_pagelen(skb); 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds if (first_len - hlen > mtu || 8091da177e4SLinus Torvalds ((first_len - hlen) & 7) || 81056f8a75cSPaul Gortmaker ip_is_fragment(iph) || 811254d900bSVasily Averin skb_cloned(skb) || 812254d900bSVasily Averin skb_headroom(skb) < ll_rs) 8131da177e4SLinus Torvalds goto slow_path; 8141da177e4SLinus Torvalds 815d7fcf1a5SDavid S. Miller skb_walk_frags(skb, frag) { 8161da177e4SLinus Torvalds /* Correct geometry. */ 8171da177e4SLinus Torvalds if (frag->len > mtu || 8181da177e4SLinus Torvalds ((frag->len & 7) && frag->next) || 819254d900bSVasily Averin skb_headroom(frag) < hlen + ll_rs) 8203d13008eSEric Dumazet goto slow_path_clean; 8211da177e4SLinus Torvalds 8221da177e4SLinus Torvalds /* Partially cloned skb? */ 8231da177e4SLinus Torvalds if (skb_shared(frag)) 8243d13008eSEric Dumazet goto slow_path_clean; 8252fdba6b0SHerbert Xu 8262fdba6b0SHerbert Xu BUG_ON(frag->sk); 8272fdba6b0SHerbert Xu if (skb->sk) { 8282fdba6b0SHerbert Xu frag->sk = skb->sk; 8292fdba6b0SHerbert Xu frag->destructor = sock_wfree; 8302fdba6b0SHerbert Xu } 8313d13008eSEric Dumazet skb->truesize -= frag->truesize; 8321da177e4SLinus Torvalds } 8331da177e4SLinus Torvalds 8341da177e4SLinus Torvalds /* Everything is OK. Generate! */ 835c8b17be0SPablo Neira Ayuso ip_fraglist_init(skb, iph, hlen, &iter); 8361b9fbe81SYajun Deng 8371da177e4SLinus Torvalds for (;;) { 8381da177e4SLinus Torvalds /* Prepare header of the next frame, 8391da177e4SLinus Torvalds * before previous one went down. */ 84019c3401aSPablo Neira Ayuso if (iter.frag) { 84127a8caa5SJakub Kicinski bool first_frag = (iter.offset == 0); 84227a8caa5SJakub Kicinski 843faf482caSYajun Deng IPCB(iter.frag)->flags = IPCB(skb)->flags; 844c8b17be0SPablo Neira Ayuso ip_fraglist_prepare(skb, &iter); 84527a8caa5SJakub Kicinski if (first_frag && IPCB(skb)->opt.optlen) { 84627a8caa5SJakub Kicinski /* ipcb->opt is not populated for frags 84727a8caa5SJakub Kicinski * coming from __ip_make_skb(), 84827a8caa5SJakub Kicinski * ip_options_fragment() needs optlen 84927a8caa5SJakub Kicinski */ 85027a8caa5SJakub Kicinski IPCB(iter.frag)->opt.optlen = 85127a8caa5SJakub Kicinski IPCB(skb)->opt.optlen; 85227a8caa5SJakub Kicinski ip_options_fragment(iter.frag); 85327a8caa5SJakub Kicinski ip_send_check(iter.iph); 85427a8caa5SJakub Kicinski } 85519c3401aSPablo Neira Ayuso } 8561da177e4SLinus Torvalds 857a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, tstamp, mono_delivery_time); 858694869b3SEric W. Biederman err = output(net, sk, skb); 8591da177e4SLinus Torvalds 860dafee490SWei Dong if (!err) 86126a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); 862c8b17be0SPablo Neira Ayuso if (err || !iter.frag) 8631da177e4SLinus Torvalds break; 8641da177e4SLinus Torvalds 865c8b17be0SPablo Neira Ayuso skb = ip_fraglist_next(&iter); 8661da177e4SLinus Torvalds } 8671da177e4SLinus Torvalds 8681da177e4SLinus Torvalds if (err == 0) { 86926a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); 8701da177e4SLinus Torvalds return 0; 8711da177e4SLinus Torvalds } 8721da177e4SLinus Torvalds 873b7034146SEric Dumazet kfree_skb_list(iter.frag); 874942f146aSPablo Neira Ayuso 87526a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 8761da177e4SLinus Torvalds return err; 8773d13008eSEric Dumazet 8783d13008eSEric Dumazet slow_path_clean: 8793d13008eSEric Dumazet skb_walk_frags(skb, frag2) { 8803d13008eSEric Dumazet if (frag2 == frag) 8813d13008eSEric Dumazet break; 8823d13008eSEric Dumazet frag2->sk = NULL; 8833d13008eSEric Dumazet frag2->destructor = NULL; 8843d13008eSEric Dumazet skb->truesize += frag2->truesize; 8853d13008eSEric Dumazet } 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds 8881da177e4SLinus Torvalds slow_path: 8891da177e4SLinus Torvalds /* 8901da177e4SLinus Torvalds * Fragment the datagram. 8911da177e4SLinus Torvalds */ 8921da177e4SLinus Torvalds 893e7a409c3SEric Dumazet ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU, 894e7a409c3SEric Dumazet &state); 8951da177e4SLinus Torvalds 8961da177e4SLinus Torvalds /* 8971da177e4SLinus Torvalds * Keep copying data until we run out. 8981da177e4SLinus Torvalds */ 8991da177e4SLinus Torvalds 900065ff79fSPablo Neira Ayuso while (state.left > 0) { 90119c3401aSPablo Neira Ayuso bool first_frag = (state.offset == 0); 90219c3401aSPablo Neira Ayuso 903065ff79fSPablo Neira Ayuso skb2 = ip_frag_next(skb, &state); 904065ff79fSPablo Neira Ayuso if (IS_ERR(skb2)) { 905065ff79fSPablo Neira Ayuso err = PTR_ERR(skb2); 9061da177e4SLinus Torvalds goto fail; 9071da177e4SLinus Torvalds } 908faf482caSYajun Deng ip_frag_ipcb(skb, skb2, first_frag); 9091da177e4SLinus Torvalds 9101da177e4SLinus Torvalds /* 9111da177e4SLinus Torvalds * Put this fragment into the sending queue. 9121da177e4SLinus Torvalds */ 913a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb2, tstamp, mono_delivery_time); 914694869b3SEric W. Biederman err = output(net, sk, skb2); 9151da177e4SLinus Torvalds if (err) 9161da177e4SLinus Torvalds goto fail; 917dafee490SWei Dong 91826a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); 9191da177e4SLinus Torvalds } 9205d0ba55bSEric Dumazet consume_skb(skb); 92126a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); 9221da177e4SLinus Torvalds return err; 9231da177e4SLinus Torvalds 9241da177e4SLinus Torvalds fail: 9251da177e4SLinus Torvalds kfree_skb(skb); 92626a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 9271da177e4SLinus Torvalds return err; 9281da177e4SLinus Torvalds } 92949d16b23SAndy Zhou EXPORT_SYMBOL(ip_do_fragment); 9302e2f7aefSPatrick McHardy 9311da177e4SLinus Torvalds int 9321da177e4SLinus Torvalds ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 9331da177e4SLinus Torvalds { 934f69e6d13SAl Viro struct msghdr *msg = from; 9351da177e4SLinus Torvalds 93684fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 9370b62fca2SAl Viro if (!copy_from_iter_full(to, len, &msg->msg_iter)) 9381da177e4SLinus Torvalds return -EFAULT; 9391da177e4SLinus Torvalds } else { 94044bb9363SAl Viro __wsum csum = 0; 9410b62fca2SAl Viro if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter)) 9421da177e4SLinus Torvalds return -EFAULT; 9431da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, csum, odd); 9441da177e4SLinus Torvalds } 9451da177e4SLinus Torvalds return 0; 9461da177e4SLinus Torvalds } 9474bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_generic_getfrag); 9481da177e4SLinus Torvalds 949f5fca608SDavid S. Miller static int __ip_append_data(struct sock *sk, 950f5fca608SDavid S. Miller struct flowi4 *fl4, 951f5fca608SDavid S. Miller struct sk_buff_head *queue, 9521470ddf7SHerbert Xu struct inet_cork *cork, 9535640f768SEric Dumazet struct page_frag *pfrag, 9541470ddf7SHerbert Xu int getfrag(void *from, char *to, int offset, 9551470ddf7SHerbert Xu int len, int odd, struct sk_buff *skb), 9561da177e4SLinus Torvalds void *from, int length, int transhdrlen, 9571da177e4SLinus Torvalds unsigned int flags) 9581da177e4SLinus Torvalds { 9591da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 960b5947e5dSWillem de Bruijn struct ubuf_info *uarg = NULL; 9611da177e4SLinus Torvalds struct sk_buff *skb; 96207df5294SHerbert Xu struct ip_options *opt = cork->opt; 9631da177e4SLinus Torvalds int hh_len; 9641da177e4SLinus Torvalds int exthdrlen; 9651da177e4SLinus Torvalds int mtu; 9661da177e4SLinus Torvalds int copy; 9671da177e4SLinus Torvalds int err; 9681da177e4SLinus Torvalds int offset = 0; 9698eb77cc7SPavel Begunkov bool zc = false; 970daba287bSHannes Frederic Sowa unsigned int maxfraglen, fragheaderlen, maxnonfragsize; 9711da177e4SLinus Torvalds int csummode = CHECKSUM_NONE; 9721470ddf7SHerbert Xu struct rtable *rt = (struct rtable *)cork->dst; 973694aba69SEric Dumazet unsigned int wmem_alloc_delta = 0; 974100f6d8eSWillem de Bruijn bool paged, extra_uref = false; 97509c2d251SWillem de Bruijn u32 tskey = 0; 9761da177e4SLinus Torvalds 97796d7303eSSteffen Klassert skb = skb_peek_tail(queue); 97896d7303eSSteffen Klassert 97996d7303eSSteffen Klassert exthdrlen = !skb ? rt->dst.header_len : 0; 980bec1f6f6SWillem de Bruijn mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; 98115e36f5bSWillem de Bruijn paged = !!cork->gso_size; 982bec1f6f6SWillem de Bruijn 9838ca5a579SVadim Fedorenko if (cork->tx_flags & SKBTX_ANY_TSTAMP && 98409c2d251SWillem de Bruijn sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) 985a1cdec57SEric Dumazet tskey = atomic_inc_return(&sk->sk_tskey) - 1; 9861470ddf7SHerbert Xu 987d8d1f30bSChangli Gao hh_len = LL_RESERVED_SPACE(rt->dst.dev); 9881da177e4SLinus Torvalds 9891da177e4SLinus Torvalds fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 9901da177e4SLinus Torvalds maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 991cbc08a33SMiaohe Lin maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu; 9921da177e4SLinus Torvalds 993daba287bSHannes Frederic Sowa if (cork->length + length > maxnonfragsize - fragheaderlen) { 994f5fca608SDavid S. Miller ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 99561e7f09dSHannes Frederic Sowa mtu - (opt ? opt->optlen : 0)); 9961da177e4SLinus Torvalds return -EMSGSIZE; 9971da177e4SLinus Torvalds } 9981da177e4SLinus Torvalds 9991da177e4SLinus Torvalds /* 10001da177e4SLinus Torvalds * transhdrlen > 0 means that this is the first fragment and we wish 10011da177e4SLinus Torvalds * it won't be fragmented in the future. 10021da177e4SLinus Torvalds */ 10031da177e4SLinus Torvalds if (transhdrlen && 10041da177e4SLinus Torvalds length + fragheaderlen <= mtu && 1005c8cd0989STom Herbert rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) && 1006bec1f6f6SWillem de Bruijn (!(flags & MSG_MORE) || cork->gso_size) && 1007cd027a54SJacek Kalwas (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM))) 100884fa7933SPatrick McHardy csummode = CHECKSUM_PARTIAL; 10091da177e4SLinus Torvalds 1010c445f31bSPavel Begunkov if ((flags & MSG_ZEROCOPY) && length) { 1011c445f31bSPavel Begunkov struct msghdr *msg = from; 1012c445f31bSPavel Begunkov 1013c445f31bSPavel Begunkov if (getfrag == ip_generic_getfrag && msg->msg_ubuf) { 1014c445f31bSPavel Begunkov if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb)) 1015c445f31bSPavel Begunkov return -EINVAL; 1016c445f31bSPavel Begunkov 1017c445f31bSPavel Begunkov /* Leave uarg NULL if can't zerocopy, callers should 1018c445f31bSPavel Begunkov * be able to handle it. 1019c445f31bSPavel Begunkov */ 1020c445f31bSPavel Begunkov if ((rt->dst.dev->features & NETIF_F_SG) && 1021c445f31bSPavel Begunkov csummode == CHECKSUM_PARTIAL) { 1022c445f31bSPavel Begunkov paged = true; 1023c445f31bSPavel Begunkov zc = true; 1024c445f31bSPavel Begunkov uarg = msg->msg_ubuf; 1025c445f31bSPavel Begunkov } 1026c445f31bSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 10278c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb)); 1028b5947e5dSWillem de Bruijn if (!uarg) 1029b5947e5dSWillem de Bruijn return -ENOBUFS; 1030522924b5SWillem de Bruijn extra_uref = !skb_zcopy(skb); /* only ref on new uarg */ 1031b5947e5dSWillem de Bruijn if (rt->dst.dev->features & NETIF_F_SG && 1032b5947e5dSWillem de Bruijn csummode == CHECKSUM_PARTIAL) { 1033b5947e5dSWillem de Bruijn paged = true; 10348eb77cc7SPavel Begunkov zc = true; 1035b5947e5dSWillem de Bruijn } else { 1036e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0; 103752900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, &extra_uref); 1038b5947e5dSWillem de Bruijn } 1039b5947e5dSWillem de Bruijn } 10407da0dde6SDavid Howells } else if ((flags & MSG_SPLICE_PAGES) && length) { 1041cafbe182SEric Dumazet if (inet_test_bit(HDRINCL, sk)) 10427da0dde6SDavid Howells return -EPERM; 10435a6f6873SDavid Howells if (rt->dst.dev->features & NETIF_F_SG && 10445a6f6873SDavid Howells getfrag == ip_generic_getfrag) 10457da0dde6SDavid Howells /* We need an empty buffer to attach stuff to */ 10467da0dde6SDavid Howells paged = true; 10477da0dde6SDavid Howells else 10487da0dde6SDavid Howells flags &= ~MSG_SPLICE_PAGES; 1049c445f31bSPavel Begunkov } 1050b5947e5dSWillem de Bruijn 10511470ddf7SHerbert Xu cork->length += length; 10521da177e4SLinus Torvalds 10531da177e4SLinus Torvalds /* So, what's going on in the loop below? 10541da177e4SLinus Torvalds * 10551da177e4SLinus Torvalds * We use calculated fragment length to generate chained skb, 10561da177e4SLinus Torvalds * each of segments is IP fragment ready for sending to network after 10571da177e4SLinus Torvalds * adding appropriate IP header. 10581da177e4SLinus Torvalds */ 10591da177e4SLinus Torvalds 106026cde9f7SHerbert Xu if (!skb) 10611da177e4SLinus Torvalds goto alloc_new_skb; 10621da177e4SLinus Torvalds 10631da177e4SLinus Torvalds while (length > 0) { 10641da177e4SLinus Torvalds /* Check if the remaining data fits into current packet. */ 10651da177e4SLinus Torvalds copy = mtu - skb->len; 10661da177e4SLinus Torvalds if (copy < length) 10671da177e4SLinus Torvalds copy = maxfraglen - skb->len; 10681da177e4SLinus Torvalds if (copy <= 0) { 10691da177e4SLinus Torvalds char *data; 10701da177e4SLinus Torvalds unsigned int datalen; 10711da177e4SLinus Torvalds unsigned int fraglen; 10721da177e4SLinus Torvalds unsigned int fraggap; 10736d123b81SJakub Kicinski unsigned int alloclen, alloc_extra; 1074aba36930SWillem de Bruijn unsigned int pagedlen; 10751da177e4SLinus Torvalds struct sk_buff *skb_prev; 10761da177e4SLinus Torvalds alloc_new_skb: 10771da177e4SLinus Torvalds skb_prev = skb; 10781da177e4SLinus Torvalds if (skb_prev) 10791da177e4SLinus Torvalds fraggap = skb_prev->len - maxfraglen; 10801da177e4SLinus Torvalds else 10811da177e4SLinus Torvalds fraggap = 0; 10821da177e4SLinus Torvalds 10831da177e4SLinus Torvalds /* 10841da177e4SLinus Torvalds * If remaining data exceeds the mtu, 10851da177e4SLinus Torvalds * we know we need more fragment(s). 10861da177e4SLinus Torvalds */ 10871da177e4SLinus Torvalds datalen = length + fraggap; 10881da177e4SLinus Torvalds if (datalen > mtu - fragheaderlen) 10891da177e4SLinus Torvalds datalen = maxfraglen - fragheaderlen; 10901da177e4SLinus Torvalds fraglen = datalen + fragheaderlen; 1091aba36930SWillem de Bruijn pagedlen = 0; 10921da177e4SLinus Torvalds 10936d123b81SJakub Kicinski alloc_extra = hh_len + 15; 10946d123b81SJakub Kicinski alloc_extra += exthdrlen; 1095353e5c9aSSteffen Klassert 10961da177e4SLinus Torvalds /* The last fragment gets additional space at tail. 10971da177e4SLinus Torvalds * Note, with MSG_MORE we overallocate on fragments, 10981da177e4SLinus Torvalds * because we have no idea what fragment will be 10991da177e4SLinus Torvalds * the last. 11001da177e4SLinus Torvalds */ 110133f99dc7SSteffen Klassert if (datalen == length + fraggap) 11026d123b81SJakub Kicinski alloc_extra += rt->dst.trailer_len; 11036d123b81SJakub Kicinski 11046d123b81SJakub Kicinski if ((flags & MSG_MORE) && 11056d123b81SJakub Kicinski !(rt->dst.dev->features&NETIF_F_SG)) 11066d123b81SJakub Kicinski alloclen = mtu; 11076d123b81SJakub Kicinski else if (!paged && 11086d123b81SJakub Kicinski (fraglen + alloc_extra < SKB_MAX_ALLOC || 11096d123b81SJakub Kicinski !(rt->dst.dev->features & NETIF_F_SG))) 11106d123b81SJakub Kicinski alloclen = fraglen; 111147cf8899SPavel Begunkov else { 11128eb77cc7SPavel Begunkov alloclen = fragheaderlen + transhdrlen; 11138eb77cc7SPavel Begunkov pagedlen = datalen - transhdrlen; 11146d123b81SJakub Kicinski } 11156d123b81SJakub Kicinski 11166d123b81SJakub Kicinski alloclen += alloc_extra; 111733f99dc7SSteffen Klassert 11181da177e4SLinus Torvalds if (transhdrlen) { 11196d123b81SJakub Kicinski skb = sock_alloc_send_skb(sk, alloclen, 11201da177e4SLinus Torvalds (flags & MSG_DONTWAIT), &err); 11211da177e4SLinus Torvalds } else { 11221da177e4SLinus Torvalds skb = NULL; 1123694aba69SEric Dumazet if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= 11241da177e4SLinus Torvalds 2 * sk->sk_sndbuf) 11256d123b81SJakub Kicinski skb = alloc_skb(alloclen, 11261da177e4SLinus Torvalds sk->sk_allocation); 112751456b29SIan Morris if (unlikely(!skb)) 11281da177e4SLinus Torvalds err = -ENOBUFS; 11291da177e4SLinus Torvalds } 113051456b29SIan Morris if (!skb) 11311da177e4SLinus Torvalds goto error; 11321da177e4SLinus Torvalds 11331da177e4SLinus Torvalds /* 11341da177e4SLinus Torvalds * Fill in the control structures 11351da177e4SLinus Torvalds */ 11361da177e4SLinus Torvalds skb->ip_summed = csummode; 11371da177e4SLinus Torvalds skb->csum = 0; 11381da177e4SLinus Torvalds skb_reserve(skb, hh_len); 113911878b40SWillem de Bruijn 11401da177e4SLinus Torvalds /* 11411da177e4SLinus Torvalds * Find where to start putting bytes. 11421da177e4SLinus Torvalds */ 114315e36f5bSWillem de Bruijn data = skb_put(skb, fraglen + exthdrlen - pagedlen); 1144c14d2450SArnaldo Carvalho de Melo skb_set_network_header(skb, exthdrlen); 1145b0e380b1SArnaldo Carvalho de Melo skb->transport_header = (skb->network_header + 1146b0e380b1SArnaldo Carvalho de Melo fragheaderlen); 1147353e5c9aSSteffen Klassert data += fragheaderlen + exthdrlen; 11481da177e4SLinus Torvalds 11491da177e4SLinus Torvalds if (fraggap) { 11501da177e4SLinus Torvalds skb->csum = skb_copy_and_csum_bits( 11511da177e4SLinus Torvalds skb_prev, maxfraglen, 11528d5930dfSAl Viro data + transhdrlen, fraggap); 11531da177e4SLinus Torvalds skb_prev->csum = csum_sub(skb_prev->csum, 11541da177e4SLinus Torvalds skb->csum); 11551da177e4SLinus Torvalds data += fraggap; 1156e9fa4f7bSHerbert Xu pskb_trim_unique(skb_prev, maxfraglen); 11571da177e4SLinus Torvalds } 11581da177e4SLinus Torvalds 115915e36f5bSWillem de Bruijn copy = datalen - transhdrlen - fraggap - pagedlen; 11600f71c9caSDavid Howells /* [!] NOTE: copy will be negative if pagedlen>0 11610f71c9caSDavid Howells * because then the equation reduces to -fraggap. 11620f71c9caSDavid Howells */ 11631da177e4SLinus Torvalds if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { 11641da177e4SLinus Torvalds err = -EFAULT; 11651da177e4SLinus Torvalds kfree_skb(skb); 11661da177e4SLinus Torvalds goto error; 11670f71c9caSDavid Howells } else if (flags & MSG_SPLICE_PAGES) { 11680f71c9caSDavid Howells copy = 0; 11691da177e4SLinus Torvalds } 11701da177e4SLinus Torvalds 11711da177e4SLinus Torvalds offset += copy; 117215e36f5bSWillem de Bruijn length -= copy + transhdrlen; 11731da177e4SLinus Torvalds transhdrlen = 0; 11741da177e4SLinus Torvalds exthdrlen = 0; 11751da177e4SLinus Torvalds csummode = CHECKSUM_NONE; 11761da177e4SLinus Torvalds 117752900d22SWillem de Bruijn /* only the initial fragment is time stamped */ 117852900d22SWillem de Bruijn skb_shinfo(skb)->tx_flags = cork->tx_flags; 117952900d22SWillem de Bruijn cork->tx_flags = 0; 118052900d22SWillem de Bruijn skb_shinfo(skb)->tskey = tskey; 118152900d22SWillem de Bruijn tskey = 0; 118252900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, &extra_uref); 118352900d22SWillem de Bruijn 11840dec879fSJulian Anastasov if ((flags & MSG_CONFIRM) && !skb_prev) 11850dec879fSJulian Anastasov skb_set_dst_pending_confirm(skb, 1); 11860dec879fSJulian Anastasov 11871da177e4SLinus Torvalds /* 11881da177e4SLinus Torvalds * Put the packet on the pending queue. 11891da177e4SLinus Torvalds */ 1190694aba69SEric Dumazet if (!skb->destructor) { 1191694aba69SEric Dumazet skb->destructor = sock_wfree; 1192694aba69SEric Dumazet skb->sk = sk; 1193694aba69SEric Dumazet wmem_alloc_delta += skb->truesize; 1194694aba69SEric Dumazet } 11951470ddf7SHerbert Xu __skb_queue_tail(queue, skb); 11961da177e4SLinus Torvalds continue; 11971da177e4SLinus Torvalds } 11981da177e4SLinus Torvalds 11991da177e4SLinus Torvalds if (copy > length) 12001da177e4SLinus Torvalds copy = length; 12011da177e4SLinus Torvalds 1202113f99c3SWillem de Bruijn if (!(rt->dst.dev->features&NETIF_F_SG) && 1203113f99c3SWillem de Bruijn skb_tailroom(skb) >= copy) { 12041da177e4SLinus Torvalds unsigned int off; 12051da177e4SLinus Torvalds 12061da177e4SLinus Torvalds off = skb->len; 12071da177e4SLinus Torvalds if (getfrag(from, skb_put(skb, copy), 12081da177e4SLinus Torvalds offset, copy, off, skb) < 0) { 12091da177e4SLinus Torvalds __skb_trim(skb, off); 12101da177e4SLinus Torvalds err = -EFAULT; 12111da177e4SLinus Torvalds goto error; 12121da177e4SLinus Torvalds } 12137da0dde6SDavid Howells } else if (flags & MSG_SPLICE_PAGES) { 12147da0dde6SDavid Howells struct msghdr *msg = from; 12157da0dde6SDavid Howells 12160f71c9caSDavid Howells err = -EIO; 12170f71c9caSDavid Howells if (WARN_ON_ONCE(copy > msg->msg_iter.count)) 12180f71c9caSDavid Howells goto error; 12190f71c9caSDavid Howells 12207da0dde6SDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy, 12217da0dde6SDavid Howells sk->sk_allocation); 12227da0dde6SDavid Howells if (err < 0) 12237da0dde6SDavid Howells goto error; 12247da0dde6SDavid Howells copy = err; 12257da0dde6SDavid Howells wmem_alloc_delta += copy; 1226c445f31bSPavel Begunkov } else if (!zc) { 12271da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags; 12281da177e4SLinus Torvalds 12291da177e4SLinus Torvalds err = -ENOMEM; 12305640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 12311da177e4SLinus Torvalds goto error; 12321da177e4SLinus Torvalds 1233c445f31bSPavel Begunkov skb_zcopy_downgrade_managed(skb); 12345640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page, 12355640f768SEric Dumazet pfrag->offset)) { 12361da177e4SLinus Torvalds err = -EMSGSIZE; 12375640f768SEric Dumazet if (i == MAX_SKB_FRAGS) 12381da177e4SLinus Torvalds goto error; 12395640f768SEric Dumazet 12405640f768SEric Dumazet __skb_fill_page_desc(skb, i, pfrag->page, 12415640f768SEric Dumazet pfrag->offset, 0); 12425640f768SEric Dumazet skb_shinfo(skb)->nr_frags = ++i; 12435640f768SEric Dumazet get_page(pfrag->page); 12441da177e4SLinus Torvalds } 12455640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset); 12465640f768SEric Dumazet if (getfrag(from, 12475640f768SEric Dumazet page_address(pfrag->page) + pfrag->offset, 12485640f768SEric Dumazet offset, copy, skb->len, skb) < 0) 12495640f768SEric Dumazet goto error_efault; 12505640f768SEric Dumazet 12515640f768SEric Dumazet pfrag->offset += copy; 12525640f768SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1253ede57d58SRichard Gobert skb_len_add(skb, copy); 1254694aba69SEric Dumazet wmem_alloc_delta += copy; 1255b5947e5dSWillem de Bruijn } else { 1256b5947e5dSWillem de Bruijn err = skb_zerocopy_iter_dgram(skb, from, copy); 1257b5947e5dSWillem de Bruijn if (err < 0) 1258b5947e5dSWillem de Bruijn goto error; 12591da177e4SLinus Torvalds } 12601da177e4SLinus Torvalds offset += copy; 12611da177e4SLinus Torvalds length -= copy; 12621da177e4SLinus Torvalds } 12631da177e4SLinus Torvalds 12649e8445a5SPaolo Abeni if (wmem_alloc_delta) 1265694aba69SEric Dumazet refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 12661da177e4SLinus Torvalds return 0; 12671da177e4SLinus Torvalds 12685640f768SEric Dumazet error_efault: 12695640f768SEric Dumazet err = -EFAULT; 12701da177e4SLinus Torvalds error: 12718e044917SJonathan Lemon net_zcopy_put_abort(uarg, extra_uref); 12721470ddf7SHerbert Xu cork->length -= length; 12735e38e270SPavel Emelyanov IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1274694aba69SEric Dumazet refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 12751da177e4SLinus Torvalds return err; 12761da177e4SLinus Torvalds } 12771da177e4SLinus Torvalds 12781470ddf7SHerbert Xu static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, 12791470ddf7SHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp) 12801470ddf7SHerbert Xu { 1281f6d8bd05SEric Dumazet struct ip_options_rcu *opt; 12821470ddf7SHerbert Xu struct rtable *rt; 12831470ddf7SHerbert Xu 12849783ccd0SGao Feng rt = *rtp; 12859783ccd0SGao Feng if (unlikely(!rt)) 12869783ccd0SGao Feng return -EFAULT; 12879783ccd0SGao Feng 12881470ddf7SHerbert Xu /* 12891470ddf7SHerbert Xu * setup for corking. 12901470ddf7SHerbert Xu */ 12911470ddf7SHerbert Xu opt = ipc->opt; 12921470ddf7SHerbert Xu if (opt) { 129351456b29SIan Morris if (!cork->opt) { 12941470ddf7SHerbert Xu cork->opt = kmalloc(sizeof(struct ip_options) + 40, 12951470ddf7SHerbert Xu sk->sk_allocation); 129651456b29SIan Morris if (unlikely(!cork->opt)) 12971470ddf7SHerbert Xu return -ENOBUFS; 12981470ddf7SHerbert Xu } 1299f6d8bd05SEric Dumazet memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); 13001470ddf7SHerbert Xu cork->flags |= IPCORK_OPT; 13011470ddf7SHerbert Xu cork->addr = ipc->addr; 13021470ddf7SHerbert Xu } 13039783ccd0SGao Feng 1304482fc609SHannes Frederic Sowa cork->fragsize = ip_sk_use_pmtu(sk) ? 1305501a90c9SEric Dumazet dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu); 1306501a90c9SEric Dumazet 1307501a90c9SEric Dumazet if (!inetdev_valid_mtu(cork->fragsize)) 1308501a90c9SEric Dumazet return -ENETUNREACH; 1309bec1f6f6SWillem de Bruijn 1310fbf47813SWillem de Bruijn cork->gso_size = ipc->gso_size; 1311501a90c9SEric Dumazet 13121470ddf7SHerbert Xu cork->dst = &rt->dst; 1313501a90c9SEric Dumazet /* We stole this route, caller should not release it. */ 1314501a90c9SEric Dumazet *rtp = NULL; 1315501a90c9SEric Dumazet 13161470ddf7SHerbert Xu cork->length = 0; 1317aa661581SFrancesco Fusco cork->ttl = ipc->ttl; 1318aa661581SFrancesco Fusco cork->tos = ipc->tos; 1319c6af0c22SWillem de Bruijn cork->mark = ipc->sockc.mark; 1320aa661581SFrancesco Fusco cork->priority = ipc->priority; 1321bc969a97SJesus Sanchez-Palencia cork->transmit_time = ipc->sockc.transmit_time; 1322678ca42dSWillem de Bruijn cork->tx_flags = 0; 1323678ca42dSWillem de Bruijn sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags); 13241470ddf7SHerbert Xu 13251470ddf7SHerbert Xu return 0; 13261470ddf7SHerbert Xu } 13271470ddf7SHerbert Xu 13281470ddf7SHerbert Xu /* 1329c49cf266SDavid Howells * ip_append_data() can make one large IP datagram from many pieces of 1330c49cf266SDavid Howells * data. Each piece will be held on the socket until 1331c49cf266SDavid Howells * ip_push_pending_frames() is called. Each piece can be a page or 1332c49cf266SDavid Howells * non-page data. 13331470ddf7SHerbert Xu * 13341470ddf7SHerbert Xu * Not only UDP, other transport protocols - e.g. raw sockets - can use 13351470ddf7SHerbert Xu * this interface potentially. 13361470ddf7SHerbert Xu * 13371470ddf7SHerbert Xu * LATER: length must be adjusted by pad at tail, when it is required. 13381470ddf7SHerbert Xu */ 1339f5fca608SDavid S. Miller int ip_append_data(struct sock *sk, struct flowi4 *fl4, 13401470ddf7SHerbert Xu int getfrag(void *from, char *to, int offset, int len, 13411470ddf7SHerbert Xu int odd, struct sk_buff *skb), 13421470ddf7SHerbert Xu void *from, int length, int transhdrlen, 13431470ddf7SHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp, 13441470ddf7SHerbert Xu unsigned int flags) 13451470ddf7SHerbert Xu { 13461470ddf7SHerbert Xu struct inet_sock *inet = inet_sk(sk); 13471470ddf7SHerbert Xu int err; 13481470ddf7SHerbert Xu 13491470ddf7SHerbert Xu if (flags&MSG_PROBE) 13501470ddf7SHerbert Xu return 0; 13511470ddf7SHerbert Xu 13521470ddf7SHerbert Xu if (skb_queue_empty(&sk->sk_write_queue)) { 1353bdc712b4SDavid S. Miller err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); 13541470ddf7SHerbert Xu if (err) 13551470ddf7SHerbert Xu return err; 13561470ddf7SHerbert Xu } else { 13571470ddf7SHerbert Xu transhdrlen = 0; 13581470ddf7SHerbert Xu } 13591470ddf7SHerbert Xu 13605640f768SEric Dumazet return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, 13615640f768SEric Dumazet sk_page_frag(sk), getfrag, 13621470ddf7SHerbert Xu from, length, transhdrlen, flags); 13631470ddf7SHerbert Xu } 13641470ddf7SHerbert Xu 13651470ddf7SHerbert Xu static void ip_cork_release(struct inet_cork *cork) 1366429f08e9SPavel Emelyanov { 13671470ddf7SHerbert Xu cork->flags &= ~IPCORK_OPT; 13681470ddf7SHerbert Xu kfree(cork->opt); 13691470ddf7SHerbert Xu cork->opt = NULL; 13701470ddf7SHerbert Xu dst_release(cork->dst); 13711470ddf7SHerbert Xu cork->dst = NULL; 1372429f08e9SPavel Emelyanov } 1373429f08e9SPavel Emelyanov 13741da177e4SLinus Torvalds /* 13751da177e4SLinus Torvalds * Combined all pending IP fragments on the socket as one IP datagram 13761da177e4SLinus Torvalds * and push them out. 13771da177e4SLinus Torvalds */ 13781c32c5adSHerbert Xu struct sk_buff *__ip_make_skb(struct sock *sk, 137977968b78SDavid S. Miller struct flowi4 *fl4, 13801470ddf7SHerbert Xu struct sk_buff_head *queue, 13811470ddf7SHerbert Xu struct inet_cork *cork) 13821da177e4SLinus Torvalds { 13831da177e4SLinus Torvalds struct sk_buff *skb, *tmp_skb; 13841da177e4SLinus Torvalds struct sk_buff **tail_skb; 13851da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 13860388b004SPavel Emelyanov struct net *net = sock_net(sk); 13871da177e4SLinus Torvalds struct ip_options *opt = NULL; 13881470ddf7SHerbert Xu struct rtable *rt = (struct rtable *)cork->dst; 13891da177e4SLinus Torvalds struct iphdr *iph; 139076ab608dSAlexey Dobriyan __be16 df = 0; 13911da177e4SLinus Torvalds __u8 ttl; 13921da177e4SLinus Torvalds 139351456b29SIan Morris skb = __skb_dequeue(queue); 139451456b29SIan Morris if (!skb) 13951da177e4SLinus Torvalds goto out; 13961da177e4SLinus Torvalds tail_skb = &(skb_shinfo(skb)->frag_list); 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds /* move skb->data to ip header from ext header */ 1399d56f90a7SArnaldo Carvalho de Melo if (skb->data < skb_network_header(skb)) 1400bbe735e4SArnaldo Carvalho de Melo __skb_pull(skb, skb_network_offset(skb)); 14011470ddf7SHerbert Xu while ((tmp_skb = __skb_dequeue(queue)) != NULL) { 1402cfe1fc77SArnaldo Carvalho de Melo __skb_pull(tmp_skb, skb_network_header_len(skb)); 14031da177e4SLinus Torvalds *tail_skb = tmp_skb; 14041da177e4SLinus Torvalds tail_skb = &(tmp_skb->next); 14051da177e4SLinus Torvalds skb->len += tmp_skb->len; 14061da177e4SLinus Torvalds skb->data_len += tmp_skb->len; 14071da177e4SLinus Torvalds skb->truesize += tmp_skb->truesize; 14081da177e4SLinus Torvalds tmp_skb->destructor = NULL; 14091da177e4SLinus Torvalds tmp_skb->sk = NULL; 14101da177e4SLinus Torvalds } 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow 14131da177e4SLinus Torvalds * to fragment the frame generated here. No matter, what transforms 14141da177e4SLinus Torvalds * how transforms change size of the packet, it will come out. 14151da177e4SLinus Torvalds */ 141660ff7467SWANG Cong skb->ignore_df = ip_sk_ignore_df(sk); 14171da177e4SLinus Torvalds 14181da177e4SLinus Torvalds /* DF bit is set when we want to see DF on outgoing frames. 141960ff7467SWANG Cong * If ignore_df is set too, we still allow to fragment this frame 14201da177e4SLinus Torvalds * locally. */ 1421482fc609SHannes Frederic Sowa if (inet->pmtudisc == IP_PMTUDISC_DO || 1422482fc609SHannes Frederic Sowa inet->pmtudisc == IP_PMTUDISC_PROBE || 1423d8d1f30bSChangli Gao (skb->len <= dst_mtu(&rt->dst) && 1424d8d1f30bSChangli Gao ip_dont_fragment(sk, &rt->dst))) 14251da177e4SLinus Torvalds df = htons(IP_DF); 14261da177e4SLinus Torvalds 14271470ddf7SHerbert Xu if (cork->flags & IPCORK_OPT) 14281470ddf7SHerbert Xu opt = cork->opt; 14291da177e4SLinus Torvalds 1430aa661581SFrancesco Fusco if (cork->ttl != 0) 1431aa661581SFrancesco Fusco ttl = cork->ttl; 1432aa661581SFrancesco Fusco else if (rt->rt_type == RTN_MULTICAST) 14331da177e4SLinus Torvalds ttl = inet->mc_ttl; 14341da177e4SLinus Torvalds else 1435d8d1f30bSChangli Gao ttl = ip_select_ttl(inet, &rt->dst); 14361da177e4SLinus Torvalds 1437749154aaSAnsis Atteka iph = ip_hdr(skb); 14381da177e4SLinus Torvalds iph->version = 4; 14391da177e4SLinus Torvalds iph->ihl = 5; 1440aa661581SFrancesco Fusco iph->tos = (cork->tos != -1) ? cork->tos : inet->tos; 14411da177e4SLinus Torvalds iph->frag_off = df; 14421da177e4SLinus Torvalds iph->ttl = ttl; 14431da177e4SLinus Torvalds iph->protocol = sk->sk_protocol; 144484f9307cSEric Dumazet ip_copy_addrs(iph, fl4); 1445b6a7719aSHannes Frederic Sowa ip_select_ident(net, skb, sk); 14461da177e4SLinus Torvalds 144722f728f8SDavid S. Miller if (opt) { 144822f728f8SDavid S. Miller iph->ihl += opt->optlen >> 2; 14494f0e3040SJakub Kicinski ip_options_build(skb, opt, cork->addr, rt); 145022f728f8SDavid S. Miller } 145122f728f8SDavid S. Miller 1452aa661581SFrancesco Fusco skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; 1453c6af0c22SWillem de Bruijn skb->mark = cork->mark; 1454bc969a97SJesus Sanchez-Palencia skb->tstamp = cork->transmit_time; 1455a21bba94SEric Dumazet /* 1456a21bba94SEric Dumazet * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec 1457a21bba94SEric Dumazet * on dst refcount 1458a21bba94SEric Dumazet */ 14591470ddf7SHerbert Xu cork->dst = NULL; 1460d8d1f30bSChangli Gao skb_dst_set(skb, &rt->dst); 14611da177e4SLinus Torvalds 146299e5acaeSZiyang Xuan if (iph->protocol == IPPROTO_ICMP) { 146399e5acaeSZiyang Xuan u8 icmp_type; 146499e5acaeSZiyang Xuan 146599e5acaeSZiyang Xuan /* For such sockets, transhdrlen is zero when do ip_append_data(), 146699e5acaeSZiyang Xuan * so icmphdr does not in skb linear region and can not get icmp_type 146799e5acaeSZiyang Xuan * by icmp_hdr(skb)->type. 146899e5acaeSZiyang Xuan */ 1469cafbe182SEric Dumazet if (sk->sk_type == SOCK_RAW && 1470cafbe182SEric Dumazet !inet_test_bit(HDRINCL, sk)) 147199e5acaeSZiyang Xuan icmp_type = fl4->fl4_icmp_type; 147299e5acaeSZiyang Xuan else 147399e5acaeSZiyang Xuan icmp_type = icmp_hdr(skb)->type; 147499e5acaeSZiyang Xuan icmp_out_count(net, icmp_type); 147599e5acaeSZiyang Xuan } 147696793b48SDavid L Stevens 14771c32c5adSHerbert Xu ip_cork_release(cork); 14781c32c5adSHerbert Xu out: 14791c32c5adSHerbert Xu return skb; 14801c32c5adSHerbert Xu } 14811c32c5adSHerbert Xu 1482b5ec8eeaSEric Dumazet int ip_send_skb(struct net *net, struct sk_buff *skb) 14831c32c5adSHerbert Xu { 14841c32c5adSHerbert Xu int err; 14851c32c5adSHerbert Xu 148633224b16SEric W. Biederman err = ip_local_out(net, skb->sk, skb); 14871da177e4SLinus Torvalds if (err) { 14881da177e4SLinus Torvalds if (err > 0) 14896ce9e7b5SEric Dumazet err = net_xmit_errno(err); 14901da177e4SLinus Torvalds if (err) 14911c32c5adSHerbert Xu IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); 14921da177e4SLinus Torvalds } 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds return err; 14951da177e4SLinus Torvalds } 14961da177e4SLinus Torvalds 149777968b78SDavid S. Miller int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) 14981470ddf7SHerbert Xu { 14991c32c5adSHerbert Xu struct sk_buff *skb; 15001c32c5adSHerbert Xu 150177968b78SDavid S. Miller skb = ip_finish_skb(sk, fl4); 15021c32c5adSHerbert Xu if (!skb) 15031c32c5adSHerbert Xu return 0; 15041c32c5adSHerbert Xu 15051c32c5adSHerbert Xu /* Netfilter gets whole the not fragmented skb. */ 1506b5ec8eeaSEric Dumazet return ip_send_skb(sock_net(sk), skb); 15071470ddf7SHerbert Xu } 15081470ddf7SHerbert Xu 15091da177e4SLinus Torvalds /* 15101da177e4SLinus Torvalds * Throw away all pending data on the socket. 15111da177e4SLinus Torvalds */ 15121470ddf7SHerbert Xu static void __ip_flush_pending_frames(struct sock *sk, 15131470ddf7SHerbert Xu struct sk_buff_head *queue, 15141470ddf7SHerbert Xu struct inet_cork *cork) 15151da177e4SLinus Torvalds { 15161da177e4SLinus Torvalds struct sk_buff *skb; 15171da177e4SLinus Torvalds 15181470ddf7SHerbert Xu while ((skb = __skb_dequeue_tail(queue)) != NULL) 15191da177e4SLinus Torvalds kfree_skb(skb); 15201da177e4SLinus Torvalds 15211470ddf7SHerbert Xu ip_cork_release(cork); 15221470ddf7SHerbert Xu } 15231470ddf7SHerbert Xu 15241470ddf7SHerbert Xu void ip_flush_pending_frames(struct sock *sk) 15251470ddf7SHerbert Xu { 1526bdc712b4SDavid S. Miller __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 15271da177e4SLinus Torvalds } 15281da177e4SLinus Torvalds 15291c32c5adSHerbert Xu struct sk_buff *ip_make_skb(struct sock *sk, 153077968b78SDavid S. Miller struct flowi4 *fl4, 15311c32c5adSHerbert Xu int getfrag(void *from, char *to, int offset, 15321c32c5adSHerbert Xu int len, int odd, struct sk_buff *skb), 15331c32c5adSHerbert Xu void *from, int length, int transhdrlen, 15341c32c5adSHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp, 15351cd7884dSWillem de Bruijn struct inet_cork *cork, unsigned int flags) 15361c32c5adSHerbert Xu { 15371c32c5adSHerbert Xu struct sk_buff_head queue; 15381c32c5adSHerbert Xu int err; 15391c32c5adSHerbert Xu 15401c32c5adSHerbert Xu if (flags & MSG_PROBE) 15411c32c5adSHerbert Xu return NULL; 15421c32c5adSHerbert Xu 15431c32c5adSHerbert Xu __skb_queue_head_init(&queue); 15441c32c5adSHerbert Xu 15451cd7884dSWillem de Bruijn cork->flags = 0; 15461cd7884dSWillem de Bruijn cork->addr = 0; 15471cd7884dSWillem de Bruijn cork->opt = NULL; 15481cd7884dSWillem de Bruijn err = ip_setup_cork(sk, cork, ipc, rtp); 15491c32c5adSHerbert Xu if (err) 15501c32c5adSHerbert Xu return ERR_PTR(err); 15511c32c5adSHerbert Xu 15521cd7884dSWillem de Bruijn err = __ip_append_data(sk, fl4, &queue, cork, 15535640f768SEric Dumazet ¤t->task_frag, getfrag, 15541c32c5adSHerbert Xu from, length, transhdrlen, flags); 15551c32c5adSHerbert Xu if (err) { 15561cd7884dSWillem de Bruijn __ip_flush_pending_frames(sk, &queue, cork); 15571c32c5adSHerbert Xu return ERR_PTR(err); 15581c32c5adSHerbert Xu } 15591c32c5adSHerbert Xu 15601cd7884dSWillem de Bruijn return __ip_make_skb(sk, fl4, &queue, cork); 15611c32c5adSHerbert Xu } 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds /* 15641da177e4SLinus Torvalds * Fetch data from kernel space and fill in checksum if needed. 15651da177e4SLinus Torvalds */ 15661da177e4SLinus Torvalds static int ip_reply_glue_bits(void *dptr, char *to, int offset, 15671da177e4SLinus Torvalds int len, int odd, struct sk_buff *skb) 15681da177e4SLinus Torvalds { 15695084205fSAl Viro __wsum csum; 15701da177e4SLinus Torvalds 1571cc44c17bSAl Viro csum = csum_partial_copy_nocheck(dptr+offset, to, len); 15721da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, csum, odd); 15731da177e4SLinus Torvalds return 0; 15741da177e4SLinus Torvalds } 15751da177e4SLinus Torvalds 15761da177e4SLinus Torvalds /* 15771da177e4SLinus Torvalds * Generic function to send a packet as reply to another packet. 1578be9f4a44SEric Dumazet * Used to send some TCP resets/acks so far. 15791da177e4SLinus Torvalds */ 1580bdbbb852SEric Dumazet void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 158124a2d43dSEric Dumazet const struct ip_options *sopt, 158224a2d43dSEric Dumazet __be32 daddr, __be32 saddr, 158324a2d43dSEric Dumazet const struct ip_reply_arg *arg, 1584c0a8966eSAntoine Tenart unsigned int len, u64 transmit_time, u32 txhash) 15851da177e4SLinus Torvalds { 1586f6d8bd05SEric Dumazet struct ip_options_data replyopts; 15871da177e4SLinus Torvalds struct ipcm_cookie ipc; 158877968b78SDavid S. Miller struct flowi4 fl4; 1589511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb); 1590bdbbb852SEric Dumazet struct net *net = sock_net(sk); 1591be9f4a44SEric Dumazet struct sk_buff *nskb; 15924062090eSVasily Averin int err; 1593f7ba868bSDavid Ahern int oif; 15941da177e4SLinus Torvalds 159591ed1e66SPaolo Abeni if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) 15961da177e4SLinus Torvalds return; 15971da177e4SLinus Torvalds 159835178206SWillem de Bruijn ipcm_init(&ipc); 15990a5ebb80SDavid S. Miller ipc.addr = daddr; 1600d6fb396cSEric Dumazet ipc.sockc.transmit_time = transmit_time; 16011da177e4SLinus Torvalds 1602f6d8bd05SEric Dumazet if (replyopts.opt.opt.optlen) { 16031da177e4SLinus Torvalds ipc.opt = &replyopts.opt; 16041da177e4SLinus Torvalds 1605f6d8bd05SEric Dumazet if (replyopts.opt.opt.srr) 1606f6d8bd05SEric Dumazet daddr = replyopts.opt.opt.faddr; 16071da177e4SLinus Torvalds } 16081da177e4SLinus Torvalds 1609f7ba868bSDavid Ahern oif = arg->bound_dev_if; 16109b6c14d5SDavid Ahern if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 16119b6c14d5SDavid Ahern oif = skb->skb_iif; 1612f7ba868bSDavid Ahern 1613f7ba868bSDavid Ahern flowi4_init_output(&fl4, oif, 161400483690SJon Maxwell IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark, 161566b13d99SEric Dumazet RT_TOS(arg->tos), 1616be9f4a44SEric Dumazet RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, 1617538de0e0SDavid S. Miller ip_reply_arg_flowi_flags(arg), 161870e73416SDavid S. Miller daddr, saddr, 1619e2d118a1SLorenzo Colitti tcp_hdr(skb)->source, tcp_hdr(skb)->dest, 1620e2d118a1SLorenzo Colitti arg->uid); 16213df98d79SPaul Moore security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 1622e22aa148Ssewookseo rt = ip_route_output_flow(net, &fl4, sk); 1623b23dd4feSDavid S. Miller if (IS_ERR(rt)) 16241da177e4SLinus Torvalds return; 16251da177e4SLinus Torvalds 1626ba9e04a7SWei Wang inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; 16271da177e4SLinus Torvalds 1628eddc9ec5SArnaldo Carvalho de Melo sk->sk_protocol = ip_hdr(skb)->protocol; 1629f0e48dbfSPatrick McHardy sk->sk_bound_dev_if = arg->bound_dev_if; 16301227c177SKuniyuki Iwashima sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); 16310da7536fSWillem de Bruijn ipc.sockc.mark = fl4.flowi4_mark; 16324062090eSVasily Averin err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 16334062090eSVasily Averin len, 0, &ipc, &rt, MSG_DONTWAIT); 16344062090eSVasily Averin if (unlikely(err)) { 16354062090eSVasily Averin ip_flush_pending_frames(sk); 16364062090eSVasily Averin goto out; 16374062090eSVasily Averin } 16384062090eSVasily Averin 1639be9f4a44SEric Dumazet nskb = skb_peek(&sk->sk_write_queue); 1640be9f4a44SEric Dumazet if (nskb) { 16411da177e4SLinus Torvalds if (arg->csumoffset >= 0) 1642be9f4a44SEric Dumazet *((__sum16 *)skb_transport_header(nskb) + 1643be9f4a44SEric Dumazet arg->csumoffset) = csum_fold(csum_add(nskb->csum, 16449c70220bSArnaldo Carvalho de Melo arg->csum)); 1645be9f4a44SEric Dumazet nskb->ip_summed = CHECKSUM_NONE; 1646d98d58a0SMartin KaFai Lau nskb->mono_delivery_time = !!transmit_time; 1647c0a8966eSAntoine Tenart if (txhash) 1648c0a8966eSAntoine Tenart skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4); 164977968b78SDavid S. Miller ip_push_pending_frames(sk, &fl4); 16501da177e4SLinus Torvalds } 16514062090eSVasily Averin out: 16521da177e4SLinus Torvalds ip_rt_put(rt); 16531da177e4SLinus Torvalds } 16541da177e4SLinus Torvalds 16551da177e4SLinus Torvalds void __init ip_init(void) 16561da177e4SLinus Torvalds { 16571da177e4SLinus Torvalds ip_rt_init(); 16581da177e4SLinus Torvalds inet_initpeers(); 16591da177e4SLinus Torvalds 166072c1d3bdSWANG Cong #if defined(CONFIG_IP_MULTICAST) 166172c1d3bdSWANG Cong igmp_mc_init(); 16621da177e4SLinus Torvalds #endif 16631da177e4SLinus Torvalds } 1664