1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket
51da177e4SLinus Torvalds * interface as the means of communication with the user level.
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * The Internet Protocol (IP) output module.
81da177e4SLinus Torvalds *
902c30a84SJesper Juhl * Authors: Ross Biro
101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds * Donald Becker, <becker@super.org>
121da177e4SLinus Torvalds * Alan Cox, <Alan.Cox@linux.org>
131da177e4SLinus Torvalds * Richard Underwood
141da177e4SLinus Torvalds * Stefan Becker, <stefanb@yello.ping.de>
151da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net>
161da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
171da177e4SLinus Torvalds * Hirokazu Takahashi, <taka@valinux.co.jp>
181da177e4SLinus Torvalds *
191da177e4SLinus Torvalds * See ip_input.c for original log
201da177e4SLinus Torvalds *
211da177e4SLinus Torvalds * Fixes:
221da177e4SLinus Torvalds * Alan Cox : Missing nonblock feature in ip_build_xmit.
231da177e4SLinus Torvalds * Mike Kilburn : htons() missing in ip_build_xmit.
241da177e4SLinus Torvalds * Bradford Johnson: Fix faulty handling of some frames when
251da177e4SLinus Torvalds * no route is found.
261da177e4SLinus Torvalds * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
271da177e4SLinus Torvalds * (in case if packet not accepted by
281da177e4SLinus Torvalds * output firewall rules)
291da177e4SLinus Torvalds * Mike McLagan : Routing by source
301da177e4SLinus Torvalds * Alexey Kuznetsov: use new route cache
311da177e4SLinus Torvalds * Andi Kleen: Fix broken PMTU recovery and remove
321da177e4SLinus Torvalds * some redundant tests.
331da177e4SLinus Torvalds * Vitaly E. Lavrov : Transparent proxy revived after year coma.
341da177e4SLinus Torvalds * Andi Kleen : Replace ip_reply with ip_send_reply.
351da177e4SLinus Torvalds * Andi Kleen : Split fast and slow ip_build_xmit path
361da177e4SLinus Torvalds * for decreased register pressure on x86
37a66e04ceSBhaskar Chowdhury * and more readability.
381da177e4SLinus Torvalds * Marc Boucher : When call_out_firewall returns FW_QUEUE,
391da177e4SLinus Torvalds * silently drop skb instead of failing with -EPERM.
401da177e4SLinus Torvalds * Detlev Wengorz : Copy protocol for fragments.
411da177e4SLinus Torvalds * Hirokazu Takahashi: HW checksumming for outgoing UDP
421da177e4SLinus Torvalds * datagrams.
431da177e4SLinus Torvalds * Hirokazu Takahashi: sendfile() on UDP works now.
441da177e4SLinus Torvalds */
451da177e4SLinus Torvalds
467c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
471da177e4SLinus Torvalds #include <linux/module.h>
481da177e4SLinus Torvalds #include <linux/types.h>
491da177e4SLinus Torvalds #include <linux/kernel.h>
501da177e4SLinus Torvalds #include <linux/mm.h>
511da177e4SLinus Torvalds #include <linux/string.h>
521da177e4SLinus Torvalds #include <linux/errno.h>
53a1f8e7f7SAl Viro #include <linux/highmem.h>
545a0e3ad6STejun Heo #include <linux/slab.h>
551da177e4SLinus Torvalds
561da177e4SLinus Torvalds #include <linux/socket.h>
571da177e4SLinus Torvalds #include <linux/sockios.h>
581da177e4SLinus Torvalds #include <linux/in.h>
591da177e4SLinus Torvalds #include <linux/inet.h>
601da177e4SLinus Torvalds #include <linux/netdevice.h>
611da177e4SLinus Torvalds #include <linux/etherdevice.h>
621da177e4SLinus Torvalds #include <linux/proc_fs.h>
631da177e4SLinus Torvalds #include <linux/stat.h>
641da177e4SLinus Torvalds #include <linux/init.h>
651da177e4SLinus Torvalds
661da177e4SLinus Torvalds #include <net/snmp.h>
671da177e4SLinus Torvalds #include <net/ip.h>
681da177e4SLinus Torvalds #include <net/protocol.h>
691da177e4SLinus Torvalds #include <net/route.h>
70cfacb057SPatrick McHardy #include <net/xfrm.h>
711da177e4SLinus Torvalds #include <linux/skbuff.h>
721da177e4SLinus Torvalds #include <net/sock.h>
731da177e4SLinus Torvalds #include <net/arp.h>
741da177e4SLinus Torvalds #include <net/icmp.h>
751da177e4SLinus Torvalds #include <net/checksum.h>
76d457a0e3SEric Dumazet #include <net/gso.h>
771da177e4SLinus Torvalds #include <net/inetpeer.h>
78ba9e04a7SWei Wang #include <net/inet_ecn.h>
7914972cbdSRoopa Prabhu #include <net/lwtunnel.h>
8033b48679SDaniel Mack #include <linux/bpf-cgroup.h>
811da177e4SLinus Torvalds #include <linux/igmp.h>
821da177e4SLinus Torvalds #include <linux/netfilter_ipv4.h>
831da177e4SLinus Torvalds #include <linux/netfilter_bridge.h>
841da177e4SLinus Torvalds #include <linux/netlink.h>
856cbb0df7SArnaldo Carvalho de Melo #include <linux/tcp.h>
861da177e4SLinus Torvalds
87694869b3SEric W. Biederman static int
88694869b3SEric W. Biederman ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
89c5501eb3SFlorian Westphal unsigned int mtu,
90694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *));
9149d16b23SAndy Zhou
921da177e4SLinus Torvalds /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)932fbd9679SDenis Efremov void ip_send_check(struct iphdr *iph)
941da177e4SLinus Torvalds {
951da177e4SLinus Torvalds iph->check = 0;
961da177e4SLinus Torvalds iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
971da177e4SLinus Torvalds }
984bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_send_check);
991da177e4SLinus Torvalds
__ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)100cf91a99dSEric W. Biederman int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
101c439cb2eSHerbert Xu {
102c439cb2eSHerbert Xu struct iphdr *iph = ip_hdr(skb);
103c439cb2eSHerbert Xu
10456712f74SHeng Guo IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
10556712f74SHeng Guo
106b1a78b9bSXin Long iph_set_totlen(iph, skb->len);
107c439cb2eSHerbert Xu ip_send_check(iph);
108a8e3e1a9SDavid Ahern
109a8e3e1a9SDavid Ahern /* if egress device is enslaved to an L3 master device pass the
110a8e3e1a9SDavid Ahern * skb to its handler for processing
111a8e3e1a9SDavid Ahern */
112a8e3e1a9SDavid Ahern skb = l3mdev_ip_out(sk, skb);
113a8e3e1a9SDavid Ahern if (unlikely(!skb))
114a8e3e1a9SDavid Ahern return 0;
115a8e3e1a9SDavid Ahern
116f4180439SEli Cooper skb->protocol = htons(ETH_P_IP);
117f4180439SEli Cooper
11829a26a56SEric W. Biederman return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
11929a26a56SEric W. Biederman net, sk, skb, NULL, skb_dst(skb)->dev,
12013206b6bSEric W. Biederman dst_output);
1217026b1ddSDavid Miller }
1227026b1ddSDavid Miller
ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)12333224b16SEric W. Biederman int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
124c439cb2eSHerbert Xu {
125c439cb2eSHerbert Xu int err;
126c439cb2eSHerbert Xu
127cf91a99dSEric W. Biederman err = __ip_local_out(net, sk, skb);
128c439cb2eSHerbert Xu if (likely(err == 1))
12913206b6bSEric W. Biederman err = dst_output(net, sk, skb);
130c439cb2eSHerbert Xu
131c439cb2eSHerbert Xu return err;
132c439cb2eSHerbert Xu }
133e2cb77dbSEric W. Biederman EXPORT_SYMBOL_GPL(ip_local_out);
134c439cb2eSHerbert Xu
ip_select_ttl(const struct inet_sock * inet,const struct dst_entry * dst)135abc17a11SEric Dumazet static inline int ip_select_ttl(const struct inet_sock *inet,
136abc17a11SEric Dumazet const struct dst_entry *dst)
1371da177e4SLinus Torvalds {
13810f42426SEric Dumazet int ttl = READ_ONCE(inet->uc_ttl);
1391da177e4SLinus Torvalds
1401da177e4SLinus Torvalds if (ttl < 0)
141323e126fSDavid S. Miller ttl = ip4_dst_hoplimit(dst);
1421da177e4SLinus Torvalds return ttl;
1431da177e4SLinus Torvalds }
1441da177e4SLinus Torvalds
1451da177e4SLinus Torvalds /*
1461da177e4SLinus Torvalds * Add an ip header to a skbuff and send it out.
1471da177e4SLinus Torvalds *
1481da177e4SLinus Torvalds */
ip_build_and_send_pkt(struct sk_buff * skb,const struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt,u8 tos)149cfe673b0SEric Dumazet int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
150de033b7dSWei Wang __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
151de033b7dSWei Wang u8 tos)
1521da177e4SLinus Torvalds {
153abc17a11SEric Dumazet const struct inet_sock *inet = inet_sk(sk);
154511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
15577589ce0SEric W. Biederman struct net *net = sock_net(sk);
1561da177e4SLinus Torvalds struct iphdr *iph;
1571da177e4SLinus Torvalds
1581da177e4SLinus Torvalds /* Build the IP header. */
159f6d8bd05SEric Dumazet skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
1608856dfa3SArnaldo Carvalho de Melo skb_reset_network_header(skb);
161eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb);
1621da177e4SLinus Torvalds iph->version = 4;
1631da177e4SLinus Torvalds iph->ihl = 5;
164de033b7dSWei Wang iph->tos = tos;
165d8d1f30bSChangli Gao iph->ttl = ip_select_ttl(inet, &rt->dst);
166dd927a26SDavid S. Miller iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
167dd927a26SDavid S. Miller iph->saddr = saddr;
1681da177e4SLinus Torvalds iph->protocol = sk->sk_protocol;
169970a5a3eSEric Dumazet /* Do not bother generating IPID for small packets (eg SYNACK) */
170970a5a3eSEric Dumazet if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
171cfe673b0SEric Dumazet iph->frag_off = htons(IP_DF);
172cfe673b0SEric Dumazet iph->id = 0;
173cfe673b0SEric Dumazet } else {
174cfe673b0SEric Dumazet iph->frag_off = 0;
175970a5a3eSEric Dumazet /* TCP packets here are SYNACK with fat IPv4/TCP options.
176970a5a3eSEric Dumazet * Avoid using the hashed IP ident generator.
177970a5a3eSEric Dumazet */
178970a5a3eSEric Dumazet if (sk->sk_protocol == IPPROTO_TCP)
1797e3cf084SJason A. Donenfeld iph->id = (__force __be16)get_random_u16();
180970a5a3eSEric Dumazet else
18177589ce0SEric W. Biederman __ip_select_ident(net, iph, 1);
182cfe673b0SEric Dumazet }
1831da177e4SLinus Torvalds
184f6d8bd05SEric Dumazet if (opt && opt->opt.optlen) {
185f6d8bd05SEric Dumazet iph->ihl += opt->opt.optlen>>2;
1864f0e3040SJakub Kicinski ip_options_build(skb, &opt->opt, daddr, rt);
1871da177e4SLinus Torvalds }
1881da177e4SLinus Torvalds
1898bf43be7SEric Dumazet skb->priority = READ_ONCE(sk->sk_priority);
190e05a90ecSJamal Hadi Salim if (!skb->mark)
1913c5b4d69SEric Dumazet skb->mark = READ_ONCE(sk->sk_mark);
1921da177e4SLinus Torvalds
1931da177e4SLinus Torvalds /* Send it out. */
19433224b16SEric W. Biederman return ip_local_out(net, skb->sk, skb);
1951da177e4SLinus Torvalds }
196d8c97a94SArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
197d8c97a94SArnaldo Carvalho de Melo
ip_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)198694869b3SEric W. Biederman static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
1991da177e4SLinus Torvalds {
200adf30907SEric Dumazet struct dst_entry *dst = skb_dst(skb);
20180787ebcSMitsuru Chinen struct rtable *rt = (struct rtable *)dst;
2021da177e4SLinus Torvalds struct net_device *dev = dst->dev;
203c2636b4dSChuck Lever unsigned int hh_len = LL_RESERVED_SPACE(dev);
204f6b72b62SDavid S. Miller struct neighbour *neigh;
2055c9f7c1dSDavid Ahern bool is_v6gw = false;
2061da177e4SLinus Torvalds
207edf391ffSNeil Horman if (rt->rt_type == RTN_MULTICAST) {
2084ba1bf42SEric W. Biederman IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
209edf391ffSNeil Horman } else if (rt->rt_type == RTN_BROADCAST)
2104ba1bf42SEric W. Biederman IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
21180787ebcSMitsuru Chinen
212e4da8c78SHeng Guo /* OUTOCTETS should be counted after fragment */
213e4da8c78SHeng Guo IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
214e4da8c78SHeng Guo
2153b04dddeSStephen Hemminger if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2165678a595SVasily Averin skb = skb_expand_head(skb, hh_len);
2175678a595SVasily Averin if (!skb)
2181da177e4SLinus Torvalds return -ENOMEM;
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds
22114972cbdSRoopa Prabhu if (lwtunnel_xmit_redirect(dst->lwtstate)) {
22214972cbdSRoopa Prabhu int res = lwtunnel_xmit(skb);
22314972cbdSRoopa Prabhu
224a171fbecSYan Zhai if (res != LWTUNNEL_XMIT_CONTINUE)
22514972cbdSRoopa Prabhu return res;
22614972cbdSRoopa Prabhu }
22714972cbdSRoopa Prabhu
22809eed119SEric Dumazet rcu_read_lock();
2295c9f7c1dSDavid Ahern neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2309871f1adSVasiliy Kulikov if (!IS_ERR(neigh)) {
2314ff06203SJulian Anastasov int res;
2324ff06203SJulian Anastasov
2334ff06203SJulian Anastasov sock_confirm_neigh(skb, neigh);
2345c9f7c1dSDavid Ahern /* if crossing protocols, can not use the cached header */
2355c9f7c1dSDavid Ahern res = neigh_output(neigh, skb, is_v6gw);
23609eed119SEric Dumazet rcu_read_unlock();
237f2c31e32SEric Dumazet return res;
238f2c31e32SEric Dumazet }
23909eed119SEric Dumazet rcu_read_unlock();
24005e3aa09SDavid S. Miller
241e87cc472SJoe Perches net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
242e87cc472SJoe Perches __func__);
2435e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
244c67180efSxu xin return PTR_ERR(neigh);
2451da177e4SLinus Torvalds }
2461da177e4SLinus Torvalds
ip_finish_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu)247694869b3SEric W. Biederman static int ip_finish_output_gso(struct net *net, struct sock *sk,
248694869b3SEric W. Biederman struct sk_buff *skb, unsigned int mtu)
249c7ba65d7SFlorian Westphal {
25088bebdf5SJason A. Donenfeld struct sk_buff *segs, *nskb;
251c7ba65d7SFlorian Westphal netdev_features_t features;
252c7ba65d7SFlorian Westphal int ret = 0;
253c7ba65d7SFlorian Westphal
2549ee6c5dcSLance Richardson /* common case: seglen is <= mtu
255359ebda2SShmulik Ladkani */
256779b7931SDaniel Axtens if (skb_gso_validate_network_len(skb, mtu))
257694869b3SEric W. Biederman return ip_finish_output2(net, sk, skb);
258c7ba65d7SFlorian Westphal
2590ace81ecSLance Richardson /* Slowpath - GSO segment length exceeds the egress MTU.
260c7ba65d7SFlorian Westphal *
2610ace81ecSLance Richardson * This can happen in several cases:
2620ace81ecSLance Richardson * - Forwarding of a TCP GRO skb, when DF flag is not set.
2630ace81ecSLance Richardson * - Forwarding of an skb that arrived on a virtualization interface
2640ace81ecSLance Richardson * (virtio-net/vhost/tap) with TSO/GSO size set by other network
2650ace81ecSLance Richardson * stack.
2660ace81ecSLance Richardson * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
2670ace81ecSLance Richardson * interface with a smaller MTU.
2680ace81ecSLance Richardson * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
2690ace81ecSLance Richardson * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
270a66e04ceSBhaskar Chowdhury * insufficient MTU.
271c7ba65d7SFlorian Westphal */
272c7ba65d7SFlorian Westphal features = netif_skb_features(skb);
273a08e7fd9SCambda Zhu BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
274c7ba65d7SFlorian Westphal segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
275330966e5SFlorian Westphal if (IS_ERR_OR_NULL(segs)) {
276c7ba65d7SFlorian Westphal kfree_skb(skb);
277c7ba65d7SFlorian Westphal return -ENOMEM;
278c7ba65d7SFlorian Westphal }
279c7ba65d7SFlorian Westphal
280c7ba65d7SFlorian Westphal consume_skb(skb);
281c7ba65d7SFlorian Westphal
28288bebdf5SJason A. Donenfeld skb_list_walk_safe(segs, segs, nskb) {
283c7ba65d7SFlorian Westphal int err;
284c7ba65d7SFlorian Westphal
285a8305bffSDavid S. Miller skb_mark_not_on_list(segs);
286694869b3SEric W. Biederman err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
287c7ba65d7SFlorian Westphal
288c7ba65d7SFlorian Westphal if (err && ret == 0)
289c7ba65d7SFlorian Westphal ret = err;
29088bebdf5SJason A. Donenfeld }
291c7ba65d7SFlorian Westphal
292c7ba65d7SFlorian Westphal return ret;
293c7ba65d7SFlorian Westphal }
294c7ba65d7SFlorian Westphal
__ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)295956fe219Sbrakmo static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2961da177e4SLinus Torvalds {
297c5501eb3SFlorian Westphal unsigned int mtu;
298c5501eb3SFlorian Westphal
2995c901daaSPatrick McHardy #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
3005c901daaSPatrick McHardy /* Policy lookup after SNAT yielded a new policy */
30100db4124SIan Morris if (skb_dst(skb)->xfrm) {
30248d5cad8SPatrick McHardy IPCB(skb)->flags |= IPSKB_REROUTED;
30313206b6bSEric W. Biederman return dst_output(net, sk, skb);
30448d5cad8SPatrick McHardy }
3055c901daaSPatrick McHardy #endif
306fedbb6b4SShmulik Ladkani mtu = ip_skb_dst_mtu(sk, skb);
307c7ba65d7SFlorian Westphal if (skb_is_gso(skb))
308694869b3SEric W. Biederman return ip_finish_output_gso(net, sk, skb, mtu);
309c7ba65d7SFlorian Westphal
310bb4cc1a1SFlorian Westphal if (skb->len > mtu || IPCB(skb)->frag_max_size)
311694869b3SEric W. Biederman return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
312c7ba65d7SFlorian Westphal
313694869b3SEric W. Biederman return ip_finish_output2(net, sk, skb);
3141da177e4SLinus Torvalds }
3151da177e4SLinus Torvalds
ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)316956fe219Sbrakmo static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
317956fe219Sbrakmo {
318956fe219Sbrakmo int ret;
319956fe219Sbrakmo
320956fe219Sbrakmo ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
321956fe219Sbrakmo switch (ret) {
322956fe219Sbrakmo case NET_XMIT_SUCCESS:
323956fe219Sbrakmo return __ip_finish_output(net, sk, skb);
324956fe219Sbrakmo case NET_XMIT_CN:
325956fe219Sbrakmo return __ip_finish_output(net, sk, skb) ? : ret;
326956fe219Sbrakmo default:
3275e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
328956fe219Sbrakmo return ret;
329956fe219Sbrakmo }
330956fe219Sbrakmo }
331956fe219Sbrakmo
ip_mc_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)33233b48679SDaniel Mack static int ip_mc_finish_output(struct net *net, struct sock *sk,
33333b48679SDaniel Mack struct sk_buff *skb)
33433b48679SDaniel Mack {
3355b18f128SStephen Suryaputra struct rtable *new_rt;
336d96ff269SDavid S. Miller bool do_cn = false;
337d96ff269SDavid S. Miller int ret, err;
33833b48679SDaniel Mack
33933b48679SDaniel Mack ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
340956fe219Sbrakmo switch (ret) {
341956fe219Sbrakmo case NET_XMIT_CN:
342d96ff269SDavid S. Miller do_cn = true;
343a8eceea8SJoe Perches fallthrough;
344d96ff269SDavid S. Miller case NET_XMIT_SUCCESS:
345d96ff269SDavid S. Miller break;
346956fe219Sbrakmo default:
3475e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
34833b48679SDaniel Mack return ret;
34933b48679SDaniel Mack }
35033b48679SDaniel Mack
3515b18f128SStephen Suryaputra /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
3525b18f128SStephen Suryaputra * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
3535b18f128SStephen Suryaputra * see ipv4_pktinfo_prepare().
3545b18f128SStephen Suryaputra */
3555b18f128SStephen Suryaputra new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
3565b18f128SStephen Suryaputra if (new_rt) {
3575b18f128SStephen Suryaputra new_rt->rt_iif = 0;
3585b18f128SStephen Suryaputra skb_dst_drop(skb);
3595b18f128SStephen Suryaputra skb_dst_set(skb, &new_rt->dst);
3605b18f128SStephen Suryaputra }
3615b18f128SStephen Suryaputra
362d96ff269SDavid S. Miller err = dev_loopback_xmit(net, sk, skb);
363d96ff269SDavid S. Miller return (do_cn && err) ? ret : err;
36433b48679SDaniel Mack }
36533b48679SDaniel Mack
ip_mc_output(struct net * net,struct sock * sk,struct sk_buff * skb)366ede2059dSEric W. Biederman int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3671da177e4SLinus Torvalds {
368511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
369d8d1f30bSChangli Gao struct net_device *dev = rt->dst.dev;
3701da177e4SLinus Torvalds
3711da177e4SLinus Torvalds /*
3721da177e4SLinus Torvalds * If the indicated interface is up and running, send the packet.
3731da177e4SLinus Torvalds */
3741da177e4SLinus Torvalds skb->dev = dev;
3751da177e4SLinus Torvalds skb->protocol = htons(ETH_P_IP);
3761da177e4SLinus Torvalds
3771da177e4SLinus Torvalds /*
3781da177e4SLinus Torvalds * Multicasts are looped back for other local users
3791da177e4SLinus Torvalds */
3801da177e4SLinus Torvalds
3811da177e4SLinus Torvalds if (rt->rt_flags&RTCF_MULTICAST) {
3827ad6848cSOctavian Purdila if (sk_mc_loop(sk)
3831da177e4SLinus Torvalds #ifdef CONFIG_IP_MROUTE
3841da177e4SLinus Torvalds /* Small optimization: do not loopback not local frames,
3851da177e4SLinus Torvalds which returned after forwarding; they will be dropped
3861da177e4SLinus Torvalds by ip_mr_input in any case.
3871da177e4SLinus Torvalds Note, that local frames are looped back to be delivered
3881da177e4SLinus Torvalds to local recipients.
3891da177e4SLinus Torvalds
3901da177e4SLinus Torvalds This check is duplicated in ip_mr_input at the moment.
3911da177e4SLinus Torvalds */
3929d4fb27dSJoe Perches &&
3939d4fb27dSJoe Perches ((rt->rt_flags & RTCF_LOCAL) ||
3949d4fb27dSJoe Perches !(IPCB(skb)->flags & IPSKB_FORWARDED))
3951da177e4SLinus Torvalds #endif
3961da177e4SLinus Torvalds ) {
3971da177e4SLinus Torvalds struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3981da177e4SLinus Torvalds if (newskb)
3999bbc768aSJan Engelhardt NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
40029a26a56SEric W. Biederman net, sk, newskb, NULL, newskb->dev,
40133b48679SDaniel Mack ip_mc_finish_output);
4021da177e4SLinus Torvalds }
4031da177e4SLinus Torvalds
4041da177e4SLinus Torvalds /* Multicasts with ttl 0 must not go beyond the host */
4051da177e4SLinus Torvalds
406eddc9ec5SArnaldo Carvalho de Melo if (ip_hdr(skb)->ttl == 0) {
4071da177e4SLinus Torvalds kfree_skb(skb);
4081da177e4SLinus Torvalds return 0;
4091da177e4SLinus Torvalds }
4101da177e4SLinus Torvalds }
4111da177e4SLinus Torvalds
4121da177e4SLinus Torvalds if (rt->rt_flags&RTCF_BROADCAST) {
4131da177e4SLinus Torvalds struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
4141da177e4SLinus Torvalds if (newskb)
41529a26a56SEric W. Biederman NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
41629a26a56SEric W. Biederman net, sk, newskb, NULL, newskb->dev,
41733b48679SDaniel Mack ip_mc_finish_output);
4181da177e4SLinus Torvalds }
4191da177e4SLinus Torvalds
42029a26a56SEric W. Biederman return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
42129a26a56SEric W. Biederman net, sk, skb, NULL, skb->dev,
42229a26a56SEric W. Biederman ip_finish_output,
42348d5cad8SPatrick McHardy !(IPCB(skb)->flags & IPSKB_REROUTED));
4241da177e4SLinus Torvalds }
4251da177e4SLinus Torvalds
ip_output(struct net * net,struct sock * sk,struct sk_buff * skb)426ede2059dSEric W. Biederman int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
4271da177e4SLinus Torvalds {
42828f8bfd1SPhil Sutter struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
4291bd9bef6SPatrick McHardy
4301bd9bef6SPatrick McHardy skb->dev = dev;
4311bd9bef6SPatrick McHardy skb->protocol = htons(ETH_P_IP);
4321bd9bef6SPatrick McHardy
43329a26a56SEric W. Biederman return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
43428f8bfd1SPhil Sutter net, sk, skb, indev, dev,
43548d5cad8SPatrick McHardy ip_finish_output,
43648d5cad8SPatrick McHardy !(IPCB(skb)->flags & IPSKB_REROUTED));
4371da177e4SLinus Torvalds }
4386585d7dcSBrian Vazquez EXPORT_SYMBOL(ip_output);
4391da177e4SLinus Torvalds
44084f9307cSEric Dumazet /*
44184f9307cSEric Dumazet * copy saddr and daddr, possibly using 64bit load/stores
44284f9307cSEric Dumazet * Equivalent to :
44384f9307cSEric Dumazet * iph->saddr = fl4->saddr;
44484f9307cSEric Dumazet * iph->daddr = fl4->daddr;
44584f9307cSEric Dumazet */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)44684f9307cSEric Dumazet static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
44784f9307cSEric Dumazet {
44884f9307cSEric Dumazet BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
44984f9307cSEric Dumazet offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
4506321c7acSGustavo A. R. Silva
4516321c7acSGustavo A. R. Silva iph->saddr = fl4->saddr;
4526321c7acSGustavo A. R. Silva iph->daddr = fl4->daddr;
45384f9307cSEric Dumazet }
45484f9307cSEric Dumazet
455b0270e91SEric Dumazet /* Note: skb->sk can be different from sk, in case of tunnels */
__ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl,__u8 tos)45669b9e1e0SXin Long int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
45769b9e1e0SXin Long __u8 tos)
4581da177e4SLinus Torvalds {
4591da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
46077589ce0SEric W. Biederman struct net *net = sock_net(sk);
461f6d8bd05SEric Dumazet struct ip_options_rcu *inet_opt;
462b57ae01aSDavid S. Miller struct flowi4 *fl4;
4631da177e4SLinus Torvalds struct rtable *rt;
4641da177e4SLinus Torvalds struct iphdr *iph;
465ab6e3febSEric Dumazet int res;
4661da177e4SLinus Torvalds
4671da177e4SLinus Torvalds /* Skip all of this if the packet is already routed,
4681da177e4SLinus Torvalds * f.e. by something like SCTP.
4691da177e4SLinus Torvalds */
470ab6e3febSEric Dumazet rcu_read_lock();
471f6d8bd05SEric Dumazet inet_opt = rcu_dereference(inet->inet_opt);
472ea4fc0d6SDavid S. Miller fl4 = &fl->u.ip4;
473511c3f92SEric Dumazet rt = skb_rtable(skb);
47400db4124SIan Morris if (rt)
4751da177e4SLinus Torvalds goto packet_routed;
4761da177e4SLinus Torvalds
4771da177e4SLinus Torvalds /* Make sure we can route this packet. */
4781da177e4SLinus Torvalds rt = (struct rtable *)__sk_dst_check(sk, 0);
47951456b29SIan Morris if (!rt) {
4803ca3c68eSAl Viro __be32 daddr;
4811da177e4SLinus Torvalds
4821da177e4SLinus Torvalds /* Use correct destination address if we have options. */
483c720c7e8SEric Dumazet daddr = inet->inet_daddr;
484f6d8bd05SEric Dumazet if (inet_opt && inet_opt->opt.srr)
485f6d8bd05SEric Dumazet daddr = inet_opt->opt.faddr;
4861da177e4SLinus Torvalds
4871da177e4SLinus Torvalds /* If this fails, retransmit mechanism of transport layer will
4881da177e4SLinus Torvalds * keep trying until route appears or the connection times
4891da177e4SLinus Torvalds * itself out.
4901da177e4SLinus Torvalds */
49177589ce0SEric W. Biederman rt = ip_route_output_ports(net, fl4, sk,
49278fbfd8aSDavid S. Miller daddr, inet->inet_saddr,
49378fbfd8aSDavid S. Miller inet->inet_dport,
49478fbfd8aSDavid S. Miller inet->inet_sport,
49578fbfd8aSDavid S. Miller sk->sk_protocol,
49669b9e1e0SXin Long RT_CONN_FLAGS_TOS(sk, tos),
49778fbfd8aSDavid S. Miller sk->sk_bound_dev_if);
498b23dd4feSDavid S. Miller if (IS_ERR(rt))
4991da177e4SLinus Torvalds goto no_route;
500d8d1f30bSChangli Gao sk_setup_caps(sk, &rt->dst);
5011da177e4SLinus Torvalds }
502d8d1f30bSChangli Gao skb_dst_set_noref(skb, &rt->dst);
5031da177e4SLinus Torvalds
5041da177e4SLinus Torvalds packet_routed:
50577d5bc7eSDavid Ahern if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
5061da177e4SLinus Torvalds goto no_route;
5071da177e4SLinus Torvalds
5081da177e4SLinus Torvalds /* OK, we know where to send it, allocate and build IP header. */
509f6d8bd05SEric Dumazet skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
5108856dfa3SArnaldo Carvalho de Melo skb_reset_network_header(skb);
511eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb);
51269b9e1e0SXin Long *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
51360ff7467SWANG Cong if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
5141da177e4SLinus Torvalds iph->frag_off = htons(IP_DF);
5151da177e4SLinus Torvalds else
5161da177e4SLinus Torvalds iph->frag_off = 0;
517d8d1f30bSChangli Gao iph->ttl = ip_select_ttl(inet, &rt->dst);
5181da177e4SLinus Torvalds iph->protocol = sk->sk_protocol;
51984f9307cSEric Dumazet ip_copy_addrs(iph, fl4);
52084f9307cSEric Dumazet
5211da177e4SLinus Torvalds /* Transport layer set skb->h.foo itself. */
5221da177e4SLinus Torvalds
523f6d8bd05SEric Dumazet if (inet_opt && inet_opt->opt.optlen) {
524f6d8bd05SEric Dumazet iph->ihl += inet_opt->opt.optlen >> 2;
5254f0e3040SJakub Kicinski ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
5261da177e4SLinus Torvalds }
5271da177e4SLinus Torvalds
52877589ce0SEric W. Biederman ip_select_ident_segs(net, skb, sk,
529b6a7719aSHannes Frederic Sowa skb_shinfo(skb)->gso_segs ?: 1);
5301da177e4SLinus Torvalds
531b0270e91SEric Dumazet /* TODO : should we use skb->sk here instead of sk ? */
5328bf43be7SEric Dumazet skb->priority = READ_ONCE(sk->sk_priority);
5333c5b4d69SEric Dumazet skb->mark = READ_ONCE(sk->sk_mark);
5341da177e4SLinus Torvalds
53533224b16SEric W. Biederman res = ip_local_out(net, sk, skb);
536ab6e3febSEric Dumazet rcu_read_unlock();
537ab6e3febSEric Dumazet return res;
5381da177e4SLinus Torvalds
5391da177e4SLinus Torvalds no_route:
540ab6e3febSEric Dumazet rcu_read_unlock();
54177589ce0SEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
5425e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
5431da177e4SLinus Torvalds return -EHOSTUNREACH;
5441da177e4SLinus Torvalds }
54569b9e1e0SXin Long EXPORT_SYMBOL(__ip_queue_xmit);
5461da177e4SLinus Torvalds
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)54705e22e83SEric Dumazet int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
54805e22e83SEric Dumazet {
54905e22e83SEric Dumazet return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
55005e22e83SEric Dumazet }
55105e22e83SEric Dumazet EXPORT_SYMBOL(ip_queue_xmit);
55205e22e83SEric Dumazet
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)5531da177e4SLinus Torvalds static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
5541da177e4SLinus Torvalds {
5551da177e4SLinus Torvalds to->pkt_type = from->pkt_type;
5561da177e4SLinus Torvalds to->priority = from->priority;
5571da177e4SLinus Torvalds to->protocol = from->protocol;
558d2f0c961SShmulik Ladkani to->skb_iif = from->skb_iif;
559adf30907SEric Dumazet skb_dst_drop(to);
560fe76cda3SEric Dumazet skb_dst_copy(to, from);
5611da177e4SLinus Torvalds to->dev = from->dev;
56282e91ffeSThomas Graf to->mark = from->mark;
5631da177e4SLinus Torvalds
5643dd1c9a1SPaolo Abeni skb_copy_hash(to, from);
5653dd1c9a1SPaolo Abeni
5661da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED
5671da177e4SLinus Torvalds to->tc_index = from->tc_index;
5681da177e4SLinus Torvalds #endif
569e7ac05f3SYasuyuki Kozakai nf_copy(to, from);
570df5042f4SFlorian Westphal skb_ext_copy(to, from);
5716ca40d4eSJavier Martinez Canillas #if IS_ENABLED(CONFIG_IP_VS)
572c98d80edSJulian Anastasov to->ipvs_property = from->ipvs_property;
573c98d80edSJulian Anastasov #endif
574984bc16cSJames Morris skb_copy_secmark(to, from);
5751da177e4SLinus Torvalds }
5761da177e4SLinus Torvalds
ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu,int (* output)(struct net *,struct sock *,struct sk_buff *))577694869b3SEric W. Biederman static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
578c5501eb3SFlorian Westphal unsigned int mtu,
579694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *))
58049d16b23SAndy Zhou {
58149d16b23SAndy Zhou struct iphdr *iph = ip_hdr(skb);
58249d16b23SAndy Zhou
583d6b915e2SFlorian Westphal if ((iph->frag_off & htons(IP_DF)) == 0)
584694869b3SEric W. Biederman return ip_do_fragment(net, sk, skb, output);
585d6b915e2SFlorian Westphal
586d6b915e2SFlorian Westphal if (unlikely(!skb->ignore_df ||
58749d16b23SAndy Zhou (IPCB(skb)->frag_max_size &&
58849d16b23SAndy Zhou IPCB(skb)->frag_max_size > mtu))) {
5899479b0afSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
59049d16b23SAndy Zhou icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
59149d16b23SAndy Zhou htonl(mtu));
59249d16b23SAndy Zhou kfree_skb(skb);
59349d16b23SAndy Zhou return -EMSGSIZE;
59449d16b23SAndy Zhou }
59549d16b23SAndy Zhou
596694869b3SEric W. Biederman return ip_do_fragment(net, sk, skb, output);
59749d16b23SAndy Zhou }
59849d16b23SAndy Zhou
ip_fraglist_init(struct sk_buff * skb,struct iphdr * iph,unsigned int hlen,struct ip_fraglist_iter * iter)599c8b17be0SPablo Neira Ayuso void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
600c8b17be0SPablo Neira Ayuso unsigned int hlen, struct ip_fraglist_iter *iter)
601c8b17be0SPablo Neira Ayuso {
602c8b17be0SPablo Neira Ayuso unsigned int first_len = skb_pagelen(skb);
603c8b17be0SPablo Neira Ayuso
604b7034146SEric Dumazet iter->frag = skb_shinfo(skb)->frag_list;
605c8b17be0SPablo Neira Ayuso skb_frag_list_init(skb);
606c8b17be0SPablo Neira Ayuso
607c8b17be0SPablo Neira Ayuso iter->offset = 0;
608c8b17be0SPablo Neira Ayuso iter->iph = iph;
609c8b17be0SPablo Neira Ayuso iter->hlen = hlen;
610c8b17be0SPablo Neira Ayuso
611c8b17be0SPablo Neira Ayuso skb->data_len = first_len - skb_headlen(skb);
612c8b17be0SPablo Neira Ayuso skb->len = first_len;
613c8b17be0SPablo Neira Ayuso iph->tot_len = htons(first_len);
614c8b17be0SPablo Neira Ayuso iph->frag_off = htons(IP_MF);
615c8b17be0SPablo Neira Ayuso ip_send_check(iph);
616c8b17be0SPablo Neira Ayuso }
617c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_init);
618c8b17be0SPablo Neira Ayuso
ip_fraglist_prepare(struct sk_buff * skb,struct ip_fraglist_iter * iter)619c8b17be0SPablo Neira Ayuso void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
620c8b17be0SPablo Neira Ayuso {
621c8b17be0SPablo Neira Ayuso unsigned int hlen = iter->hlen;
622c8b17be0SPablo Neira Ayuso struct iphdr *iph = iter->iph;
623c8b17be0SPablo Neira Ayuso struct sk_buff *frag;
624c8b17be0SPablo Neira Ayuso
625c8b17be0SPablo Neira Ayuso frag = iter->frag;
626c8b17be0SPablo Neira Ayuso frag->ip_summed = CHECKSUM_NONE;
627c8b17be0SPablo Neira Ayuso skb_reset_transport_header(frag);
628c8b17be0SPablo Neira Ayuso __skb_push(frag, hlen);
629c8b17be0SPablo Neira Ayuso skb_reset_network_header(frag);
630c8b17be0SPablo Neira Ayuso memcpy(skb_network_header(frag), iph, hlen);
631c8b17be0SPablo Neira Ayuso iter->iph = ip_hdr(frag);
632c8b17be0SPablo Neira Ayuso iph = iter->iph;
633c8b17be0SPablo Neira Ayuso iph->tot_len = htons(frag->len);
634c8b17be0SPablo Neira Ayuso ip_copy_metadata(frag, skb);
635c8b17be0SPablo Neira Ayuso iter->offset += skb->len - hlen;
636c8b17be0SPablo Neira Ayuso iph->frag_off = htons(iter->offset >> 3);
637c8b17be0SPablo Neira Ayuso if (frag->next)
638c8b17be0SPablo Neira Ayuso iph->frag_off |= htons(IP_MF);
639c8b17be0SPablo Neira Ayuso /* Ready, complete checksum */
640c8b17be0SPablo Neira Ayuso ip_send_check(iph);
641c8b17be0SPablo Neira Ayuso }
642c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_prepare);
643c8b17be0SPablo Neira Ayuso
ip_frag_init(struct sk_buff * skb,unsigned int hlen,unsigned int ll_rs,unsigned int mtu,bool DF,struct ip_frag_state * state)644065ff79fSPablo Neira Ayuso void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
645e7a409c3SEric Dumazet unsigned int ll_rs, unsigned int mtu, bool DF,
646065ff79fSPablo Neira Ayuso struct ip_frag_state *state)
647065ff79fSPablo Neira Ayuso {
648065ff79fSPablo Neira Ayuso struct iphdr *iph = ip_hdr(skb);
649065ff79fSPablo Neira Ayuso
650e7a409c3SEric Dumazet state->DF = DF;
651065ff79fSPablo Neira Ayuso state->hlen = hlen;
652065ff79fSPablo Neira Ayuso state->ll_rs = ll_rs;
653065ff79fSPablo Neira Ayuso state->mtu = mtu;
654065ff79fSPablo Neira Ayuso
655065ff79fSPablo Neira Ayuso state->left = skb->len - hlen; /* Space per frame */
656065ff79fSPablo Neira Ayuso state->ptr = hlen; /* Where to start from */
657065ff79fSPablo Neira Ayuso
658065ff79fSPablo Neira Ayuso state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
659065ff79fSPablo Neira Ayuso state->not_last_frag = iph->frag_off & htons(IP_MF);
660065ff79fSPablo Neira Ayuso }
661065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_init);
662065ff79fSPablo Neira Ayuso
ip_frag_ipcb(struct sk_buff * from,struct sk_buff * to,bool first_frag)66319c3401aSPablo Neira Ayuso static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
664faf482caSYajun Deng bool first_frag)
66519c3401aSPablo Neira Ayuso {
66619c3401aSPablo Neira Ayuso /* Copy the flags to each fragment. */
66719c3401aSPablo Neira Ayuso IPCB(to)->flags = IPCB(from)->flags;
66819c3401aSPablo Neira Ayuso
66919c3401aSPablo Neira Ayuso /* ANK: dirty, but effective trick. Upgrade options only if
67019c3401aSPablo Neira Ayuso * the segment to be fragmented was THE FIRST (otherwise,
67119c3401aSPablo Neira Ayuso * options are already fixed) and make it ONCE
67219c3401aSPablo Neira Ayuso * on the initial skb, so that all the following fragments
67319c3401aSPablo Neira Ayuso * will inherit fixed options.
67419c3401aSPablo Neira Ayuso */
67519c3401aSPablo Neira Ayuso if (first_frag)
67619c3401aSPablo Neira Ayuso ip_options_fragment(from);
67719c3401aSPablo Neira Ayuso }
67819c3401aSPablo Neira Ayuso
ip_frag_next(struct sk_buff * skb,struct ip_frag_state * state)679065ff79fSPablo Neira Ayuso struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
680065ff79fSPablo Neira Ayuso {
681065ff79fSPablo Neira Ayuso unsigned int len = state->left;
682065ff79fSPablo Neira Ayuso struct sk_buff *skb2;
683065ff79fSPablo Neira Ayuso struct iphdr *iph;
684065ff79fSPablo Neira Ayuso
685065ff79fSPablo Neira Ayuso /* IF: it doesn't fit, use 'mtu' - the data space left */
686065ff79fSPablo Neira Ayuso if (len > state->mtu)
687065ff79fSPablo Neira Ayuso len = state->mtu;
688065ff79fSPablo Neira Ayuso /* IF: we are not sending up to and including the packet end
689065ff79fSPablo Neira Ayuso then align the next start on an eight byte boundary */
690065ff79fSPablo Neira Ayuso if (len < state->left) {
691065ff79fSPablo Neira Ayuso len &= ~7;
692065ff79fSPablo Neira Ayuso }
693065ff79fSPablo Neira Ayuso
694065ff79fSPablo Neira Ayuso /* Allocate buffer */
695065ff79fSPablo Neira Ayuso skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
696065ff79fSPablo Neira Ayuso if (!skb2)
697065ff79fSPablo Neira Ayuso return ERR_PTR(-ENOMEM);
698065ff79fSPablo Neira Ayuso
699065ff79fSPablo Neira Ayuso /*
700065ff79fSPablo Neira Ayuso * Set up data on packet
701065ff79fSPablo Neira Ayuso */
702065ff79fSPablo Neira Ayuso
703065ff79fSPablo Neira Ayuso ip_copy_metadata(skb2, skb);
704065ff79fSPablo Neira Ayuso skb_reserve(skb2, state->ll_rs);
705065ff79fSPablo Neira Ayuso skb_put(skb2, len + state->hlen);
706065ff79fSPablo Neira Ayuso skb_reset_network_header(skb2);
707065ff79fSPablo Neira Ayuso skb2->transport_header = skb2->network_header + state->hlen;
708065ff79fSPablo Neira Ayuso
709065ff79fSPablo Neira Ayuso /*
710065ff79fSPablo Neira Ayuso * Charge the memory for the fragment to any owner
711065ff79fSPablo Neira Ayuso * it might possess
712065ff79fSPablo Neira Ayuso */
713065ff79fSPablo Neira Ayuso
714065ff79fSPablo Neira Ayuso if (skb->sk)
715065ff79fSPablo Neira Ayuso skb_set_owner_w(skb2, skb->sk);
716065ff79fSPablo Neira Ayuso
717065ff79fSPablo Neira Ayuso /*
718065ff79fSPablo Neira Ayuso * Copy the packet header into the new buffer.
719065ff79fSPablo Neira Ayuso */
720065ff79fSPablo Neira Ayuso
721065ff79fSPablo Neira Ayuso skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
722065ff79fSPablo Neira Ayuso
723065ff79fSPablo Neira Ayuso /*
724065ff79fSPablo Neira Ayuso * Copy a block of the IP datagram.
725065ff79fSPablo Neira Ayuso */
726065ff79fSPablo Neira Ayuso if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
727065ff79fSPablo Neira Ayuso BUG();
728065ff79fSPablo Neira Ayuso state->left -= len;
729065ff79fSPablo Neira Ayuso
730065ff79fSPablo Neira Ayuso /*
731065ff79fSPablo Neira Ayuso * Fill in the new header fields.
732065ff79fSPablo Neira Ayuso */
733065ff79fSPablo Neira Ayuso iph = ip_hdr(skb2);
734065ff79fSPablo Neira Ayuso iph->frag_off = htons((state->offset >> 3));
735e7a409c3SEric Dumazet if (state->DF)
736e7a409c3SEric Dumazet iph->frag_off |= htons(IP_DF);
737065ff79fSPablo Neira Ayuso
738065ff79fSPablo Neira Ayuso /*
739065ff79fSPablo Neira Ayuso * Added AC : If we are fragmenting a fragment that's not the
740065ff79fSPablo Neira Ayuso * last fragment then keep MF on each bit
741065ff79fSPablo Neira Ayuso */
742065ff79fSPablo Neira Ayuso if (state->left > 0 || state->not_last_frag)
743065ff79fSPablo Neira Ayuso iph->frag_off |= htons(IP_MF);
744065ff79fSPablo Neira Ayuso state->ptr += len;
745065ff79fSPablo Neira Ayuso state->offset += len;
746065ff79fSPablo Neira Ayuso
747065ff79fSPablo Neira Ayuso iph->tot_len = htons(len + state->hlen);
748065ff79fSPablo Neira Ayuso
749065ff79fSPablo Neira Ayuso ip_send_check(iph);
750065ff79fSPablo Neira Ayuso
751065ff79fSPablo Neira Ayuso return skb2;
752065ff79fSPablo Neira Ayuso }
753065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_next);
754065ff79fSPablo Neira Ayuso
7551da177e4SLinus Torvalds /*
7561da177e4SLinus Torvalds * This IP datagram is too large to be sent in one piece. Break it up into
7571da177e4SLinus Torvalds * smaller pieces (each of size equal to IP header plus
7581da177e4SLinus Torvalds * a block of the data of the original IP data part) that will yet fit in a
7591da177e4SLinus Torvalds * single device frame, and queue such a frame for sending.
7601da177e4SLinus Torvalds */
7611da177e4SLinus Torvalds
ip_do_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))762694869b3SEric W. Biederman int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
763694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *))
7641da177e4SLinus Torvalds {
7651da177e4SLinus Torvalds struct iphdr *iph;
7661da177e4SLinus Torvalds struct sk_buff *skb2;
767a1ac9c8aSMartin KaFai Lau bool mono_delivery_time = skb->mono_delivery_time;
768511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
769065ff79fSPablo Neira Ayuso unsigned int mtu, hlen, ll_rs;
770c8b17be0SPablo Neira Ayuso struct ip_fraglist_iter iter;
7719669fffcSEric Dumazet ktime_t tstamp = skb->tstamp;
772065ff79fSPablo Neira Ayuso struct ip_frag_state state;
7731da177e4SLinus Torvalds int err = 0;
7741da177e4SLinus Torvalds
775dbd3393cSHannes Frederic Sowa /* for offloaded checksums cleanup checksum before fragmentation */
776dbd3393cSHannes Frederic Sowa if (skb->ip_summed == CHECKSUM_PARTIAL &&
777dbd3393cSHannes Frederic Sowa (err = skb_checksum_help(skb)))
778dbd3393cSHannes Frederic Sowa goto fail;
779dbd3393cSHannes Frederic Sowa
7801da177e4SLinus Torvalds /*
7811da177e4SLinus Torvalds * Point into the IP datagram header.
7821da177e4SLinus Torvalds */
7831da177e4SLinus Torvalds
784eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb);
7851da177e4SLinus Torvalds
786fedbb6b4SShmulik Ladkani mtu = ip_skb_dst_mtu(sk, skb);
787d6b915e2SFlorian Westphal if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
788d6b915e2SFlorian Westphal mtu = IPCB(skb)->frag_max_size;
7891da177e4SLinus Torvalds
7901da177e4SLinus Torvalds /*
7911da177e4SLinus Torvalds * Setup starting values.
7921da177e4SLinus Torvalds */
7931da177e4SLinus Torvalds
7941da177e4SLinus Torvalds hlen = iph->ihl * 4;
795f87c10a8SHannes Frederic Sowa mtu = mtu - hlen; /* Size of data space */
79689cee8b1SHerbert Xu IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
797254d900bSVasily Averin ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
7981da177e4SLinus Torvalds
7991da177e4SLinus Torvalds /* When frag_list is given, use it. First, check its validity:
8001da177e4SLinus Torvalds * some transformers could create wrong frag_list or break existing
8011da177e4SLinus Torvalds * one, it is not prohibited. In this case fall back to copying.
8021da177e4SLinus Torvalds *
8031da177e4SLinus Torvalds * LATER: this step can be merged to real generation of fragments,
8041da177e4SLinus Torvalds * we can switch to copy when see the first bad fragment.
8051da177e4SLinus Torvalds */
80621dc3301SDavid S. Miller if (skb_has_frag_list(skb)) {
8073d13008eSEric Dumazet struct sk_buff *frag, *frag2;
808c72d8cdaSAlexey Dobriyan unsigned int first_len = skb_pagelen(skb);
8091da177e4SLinus Torvalds
8101da177e4SLinus Torvalds if (first_len - hlen > mtu ||
8111da177e4SLinus Torvalds ((first_len - hlen) & 7) ||
81256f8a75cSPaul Gortmaker ip_is_fragment(iph) ||
813254d900bSVasily Averin skb_cloned(skb) ||
814254d900bSVasily Averin skb_headroom(skb) < ll_rs)
8151da177e4SLinus Torvalds goto slow_path;
8161da177e4SLinus Torvalds
817d7fcf1a5SDavid S. Miller skb_walk_frags(skb, frag) {
8181da177e4SLinus Torvalds /* Correct geometry. */
8191da177e4SLinus Torvalds if (frag->len > mtu ||
8201da177e4SLinus Torvalds ((frag->len & 7) && frag->next) ||
821254d900bSVasily Averin skb_headroom(frag) < hlen + ll_rs)
8223d13008eSEric Dumazet goto slow_path_clean;
8231da177e4SLinus Torvalds
8241da177e4SLinus Torvalds /* Partially cloned skb? */
8251da177e4SLinus Torvalds if (skb_shared(frag))
8263d13008eSEric Dumazet goto slow_path_clean;
8272fdba6b0SHerbert Xu
8282fdba6b0SHerbert Xu BUG_ON(frag->sk);
8292fdba6b0SHerbert Xu if (skb->sk) {
8302fdba6b0SHerbert Xu frag->sk = skb->sk;
8312fdba6b0SHerbert Xu frag->destructor = sock_wfree;
8322fdba6b0SHerbert Xu }
8333d13008eSEric Dumazet skb->truesize -= frag->truesize;
8341da177e4SLinus Torvalds }
8351da177e4SLinus Torvalds
8361da177e4SLinus Torvalds /* Everything is OK. Generate! */
837c8b17be0SPablo Neira Ayuso ip_fraglist_init(skb, iph, hlen, &iter);
8381b9fbe81SYajun Deng
8391da177e4SLinus Torvalds for (;;) {
8401da177e4SLinus Torvalds /* Prepare header of the next frame,
8411da177e4SLinus Torvalds * before previous one went down. */
84219c3401aSPablo Neira Ayuso if (iter.frag) {
84327a8caa5SJakub Kicinski bool first_frag = (iter.offset == 0);
84427a8caa5SJakub Kicinski
845faf482caSYajun Deng IPCB(iter.frag)->flags = IPCB(skb)->flags;
846c8b17be0SPablo Neira Ayuso ip_fraglist_prepare(skb, &iter);
84727a8caa5SJakub Kicinski if (first_frag && IPCB(skb)->opt.optlen) {
84827a8caa5SJakub Kicinski /* ipcb->opt is not populated for frags
84927a8caa5SJakub Kicinski * coming from __ip_make_skb(),
85027a8caa5SJakub Kicinski * ip_options_fragment() needs optlen
85127a8caa5SJakub Kicinski */
85227a8caa5SJakub Kicinski IPCB(iter.frag)->opt.optlen =
85327a8caa5SJakub Kicinski IPCB(skb)->opt.optlen;
85427a8caa5SJakub Kicinski ip_options_fragment(iter.frag);
85527a8caa5SJakub Kicinski ip_send_check(iter.iph);
85627a8caa5SJakub Kicinski }
85719c3401aSPablo Neira Ayuso }
8581da177e4SLinus Torvalds
859a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, tstamp, mono_delivery_time);
860694869b3SEric W. Biederman err = output(net, sk, skb);
8611da177e4SLinus Torvalds
862dafee490SWei Dong if (!err)
86326a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
864c8b17be0SPablo Neira Ayuso if (err || !iter.frag)
8651da177e4SLinus Torvalds break;
8661da177e4SLinus Torvalds
867c8b17be0SPablo Neira Ayuso skb = ip_fraglist_next(&iter);
8681da177e4SLinus Torvalds }
8691da177e4SLinus Torvalds
8701da177e4SLinus Torvalds if (err == 0) {
87126a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
8721da177e4SLinus Torvalds return 0;
8731da177e4SLinus Torvalds }
8741da177e4SLinus Torvalds
875b7034146SEric Dumazet kfree_skb_list(iter.frag);
876942f146aSPablo Neira Ayuso
87726a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
8781da177e4SLinus Torvalds return err;
8793d13008eSEric Dumazet
8803d13008eSEric Dumazet slow_path_clean:
8813d13008eSEric Dumazet skb_walk_frags(skb, frag2) {
8823d13008eSEric Dumazet if (frag2 == frag)
8833d13008eSEric Dumazet break;
8843d13008eSEric Dumazet frag2->sk = NULL;
8853d13008eSEric Dumazet frag2->destructor = NULL;
8863d13008eSEric Dumazet skb->truesize += frag2->truesize;
8873d13008eSEric Dumazet }
8881da177e4SLinus Torvalds }
8891da177e4SLinus Torvalds
8901da177e4SLinus Torvalds slow_path:
8911da177e4SLinus Torvalds /*
8921da177e4SLinus Torvalds * Fragment the datagram.
8931da177e4SLinus Torvalds */
8941da177e4SLinus Torvalds
895e7a409c3SEric Dumazet ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
896e7a409c3SEric Dumazet &state);
8971da177e4SLinus Torvalds
8981da177e4SLinus Torvalds /*
8991da177e4SLinus Torvalds * Keep copying data until we run out.
9001da177e4SLinus Torvalds */
9011da177e4SLinus Torvalds
902065ff79fSPablo Neira Ayuso while (state.left > 0) {
90319c3401aSPablo Neira Ayuso bool first_frag = (state.offset == 0);
90419c3401aSPablo Neira Ayuso
905065ff79fSPablo Neira Ayuso skb2 = ip_frag_next(skb, &state);
906065ff79fSPablo Neira Ayuso if (IS_ERR(skb2)) {
907065ff79fSPablo Neira Ayuso err = PTR_ERR(skb2);
9081da177e4SLinus Torvalds goto fail;
9091da177e4SLinus Torvalds }
910faf482caSYajun Deng ip_frag_ipcb(skb, skb2, first_frag);
9111da177e4SLinus Torvalds
9121da177e4SLinus Torvalds /*
9131da177e4SLinus Torvalds * Put this fragment into the sending queue.
9141da177e4SLinus Torvalds */
915a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
916694869b3SEric W. Biederman err = output(net, sk, skb2);
9171da177e4SLinus Torvalds if (err)
9181da177e4SLinus Torvalds goto fail;
919dafee490SWei Dong
92026a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
9211da177e4SLinus Torvalds }
9225d0ba55bSEric Dumazet consume_skb(skb);
92326a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
9241da177e4SLinus Torvalds return err;
9251da177e4SLinus Torvalds
9261da177e4SLinus Torvalds fail:
9271da177e4SLinus Torvalds kfree_skb(skb);
92826a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
9291da177e4SLinus Torvalds return err;
9301da177e4SLinus Torvalds }
93149d16b23SAndy Zhou EXPORT_SYMBOL(ip_do_fragment);
9322e2f7aefSPatrick McHardy
9331da177e4SLinus Torvalds int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)9341da177e4SLinus Torvalds ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
9351da177e4SLinus Torvalds {
936f69e6d13SAl Viro struct msghdr *msg = from;
9371da177e4SLinus Torvalds
93884fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) {
9390b62fca2SAl Viro if (!copy_from_iter_full(to, len, &msg->msg_iter))
9401da177e4SLinus Torvalds return -EFAULT;
9411da177e4SLinus Torvalds } else {
94244bb9363SAl Viro __wsum csum = 0;
9430b62fca2SAl Viro if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
9441da177e4SLinus Torvalds return -EFAULT;
9451da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, csum, odd);
9461da177e4SLinus Torvalds }
9471da177e4SLinus Torvalds return 0;
9481da177e4SLinus Torvalds }
9494bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_generic_getfrag);
9501da177e4SLinus Torvalds
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)951f5fca608SDavid S. Miller static int __ip_append_data(struct sock *sk,
952f5fca608SDavid S. Miller struct flowi4 *fl4,
953f5fca608SDavid S. Miller struct sk_buff_head *queue,
9541470ddf7SHerbert Xu struct inet_cork *cork,
9555640f768SEric Dumazet struct page_frag *pfrag,
9561470ddf7SHerbert Xu int getfrag(void *from, char *to, int offset,
9571470ddf7SHerbert Xu int len, int odd, struct sk_buff *skb),
9581da177e4SLinus Torvalds void *from, int length, int transhdrlen,
9591da177e4SLinus Torvalds unsigned int flags)
9601da177e4SLinus Torvalds {
9611da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
962b5947e5dSWillem de Bruijn struct ubuf_info *uarg = NULL;
9631da177e4SLinus Torvalds struct sk_buff *skb;
96407df5294SHerbert Xu struct ip_options *opt = cork->opt;
9651da177e4SLinus Torvalds int hh_len;
9661da177e4SLinus Torvalds int exthdrlen;
9671da177e4SLinus Torvalds int mtu;
9681da177e4SLinus Torvalds int copy;
9691da177e4SLinus Torvalds int err;
9701da177e4SLinus Torvalds int offset = 0;
9718eb77cc7SPavel Begunkov bool zc = false;
972daba287bSHannes Frederic Sowa unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
9731da177e4SLinus Torvalds int csummode = CHECKSUM_NONE;
9741470ddf7SHerbert Xu struct rtable *rt = (struct rtable *)cork->dst;
975*19a788bdSVadim Fedorenko bool paged, hold_tskey, extra_uref = false;
976694aba69SEric Dumazet unsigned int wmem_alloc_delta = 0;
97709c2d251SWillem de Bruijn u32 tskey = 0;
9781da177e4SLinus Torvalds
97996d7303eSSteffen Klassert skb = skb_peek_tail(queue);
98096d7303eSSteffen Klassert
98196d7303eSSteffen Klassert exthdrlen = !skb ? rt->dst.header_len : 0;
982bec1f6f6SWillem de Bruijn mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
98315e36f5bSWillem de Bruijn paged = !!cork->gso_size;
984bec1f6f6SWillem de Bruijn
985d8d1f30bSChangli Gao hh_len = LL_RESERVED_SPACE(rt->dst.dev);
9861da177e4SLinus Torvalds
9871da177e4SLinus Torvalds fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
9881da177e4SLinus Torvalds maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
989cbc08a33SMiaohe Lin maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
9901da177e4SLinus Torvalds
991daba287bSHannes Frederic Sowa if (cork->length + length > maxnonfragsize - fragheaderlen) {
992f5fca608SDavid S. Miller ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
99361e7f09dSHannes Frederic Sowa mtu - (opt ? opt->optlen : 0));
9941da177e4SLinus Torvalds return -EMSGSIZE;
9951da177e4SLinus Torvalds }
9961da177e4SLinus Torvalds
9971da177e4SLinus Torvalds /*
9981da177e4SLinus Torvalds * transhdrlen > 0 means that this is the first fragment and we wish
9991da177e4SLinus Torvalds * it won't be fragmented in the future.
10001da177e4SLinus Torvalds */
10011da177e4SLinus Torvalds if (transhdrlen &&
10021da177e4SLinus Torvalds length + fragheaderlen <= mtu &&
1003c8cd0989STom Herbert rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1004bec1f6f6SWillem de Bruijn (!(flags & MSG_MORE) || cork->gso_size) &&
1005cd027a54SJacek Kalwas (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
100684fa7933SPatrick McHardy csummode = CHECKSUM_PARTIAL;
10071da177e4SLinus Torvalds
1008c445f31bSPavel Begunkov if ((flags & MSG_ZEROCOPY) && length) {
1009c445f31bSPavel Begunkov struct msghdr *msg = from;
1010c445f31bSPavel Begunkov
1011c445f31bSPavel Begunkov if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1012c445f31bSPavel Begunkov if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1013c445f31bSPavel Begunkov return -EINVAL;
1014c445f31bSPavel Begunkov
1015c445f31bSPavel Begunkov /* Leave uarg NULL if can't zerocopy, callers should
1016c445f31bSPavel Begunkov * be able to handle it.
1017c445f31bSPavel Begunkov */
1018c445f31bSPavel Begunkov if ((rt->dst.dev->features & NETIF_F_SG) &&
1019c445f31bSPavel Begunkov csummode == CHECKSUM_PARTIAL) {
1020c445f31bSPavel Begunkov paged = true;
1021c445f31bSPavel Begunkov zc = true;
1022c445f31bSPavel Begunkov uarg = msg->msg_ubuf;
1023c445f31bSPavel Begunkov }
1024c445f31bSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
10258c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1026b5947e5dSWillem de Bruijn if (!uarg)
1027b5947e5dSWillem de Bruijn return -ENOBUFS;
1028522924b5SWillem de Bruijn extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1029b5947e5dSWillem de Bruijn if (rt->dst.dev->features & NETIF_F_SG &&
1030b5947e5dSWillem de Bruijn csummode == CHECKSUM_PARTIAL) {
1031b5947e5dSWillem de Bruijn paged = true;
10328eb77cc7SPavel Begunkov zc = true;
1033b5947e5dSWillem de Bruijn } else {
1034e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0;
103552900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, &extra_uref);
1036b5947e5dSWillem de Bruijn }
1037b5947e5dSWillem de Bruijn }
10387da0dde6SDavid Howells } else if ((flags & MSG_SPLICE_PAGES) && length) {
1039cafbe182SEric Dumazet if (inet_test_bit(HDRINCL, sk))
10407da0dde6SDavid Howells return -EPERM;
10415a6f6873SDavid Howells if (rt->dst.dev->features & NETIF_F_SG &&
10425a6f6873SDavid Howells getfrag == ip_generic_getfrag)
10437da0dde6SDavid Howells /* We need an empty buffer to attach stuff to */
10447da0dde6SDavid Howells paged = true;
10457da0dde6SDavid Howells else
10467da0dde6SDavid Howells flags &= ~MSG_SPLICE_PAGES;
1047c445f31bSPavel Begunkov }
1048b5947e5dSWillem de Bruijn
10491470ddf7SHerbert Xu cork->length += length;
10501da177e4SLinus Torvalds
1051*19a788bdSVadim Fedorenko hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
1052*19a788bdSVadim Fedorenko READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
1053*19a788bdSVadim Fedorenko if (hold_tskey)
1054*19a788bdSVadim Fedorenko tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1055*19a788bdSVadim Fedorenko
10561da177e4SLinus Torvalds /* So, what's going on in the loop below?
10571da177e4SLinus Torvalds *
10581da177e4SLinus Torvalds * We use calculated fragment length to generate chained skb,
10591da177e4SLinus Torvalds * each of segments is IP fragment ready for sending to network after
10601da177e4SLinus Torvalds * adding appropriate IP header.
10611da177e4SLinus Torvalds */
10621da177e4SLinus Torvalds
106326cde9f7SHerbert Xu if (!skb)
10641da177e4SLinus Torvalds goto alloc_new_skb;
10651da177e4SLinus Torvalds
10661da177e4SLinus Torvalds while (length > 0) {
10671da177e4SLinus Torvalds /* Check if the remaining data fits into current packet. */
10681da177e4SLinus Torvalds copy = mtu - skb->len;
10691da177e4SLinus Torvalds if (copy < length)
10701da177e4SLinus Torvalds copy = maxfraglen - skb->len;
10711da177e4SLinus Torvalds if (copy <= 0) {
10721da177e4SLinus Torvalds char *data;
10731da177e4SLinus Torvalds unsigned int datalen;
10741da177e4SLinus Torvalds unsigned int fraglen;
10751da177e4SLinus Torvalds unsigned int fraggap;
10766d123b81SJakub Kicinski unsigned int alloclen, alloc_extra;
1077aba36930SWillem de Bruijn unsigned int pagedlen;
10781da177e4SLinus Torvalds struct sk_buff *skb_prev;
10791da177e4SLinus Torvalds alloc_new_skb:
10801da177e4SLinus Torvalds skb_prev = skb;
10811da177e4SLinus Torvalds if (skb_prev)
10821da177e4SLinus Torvalds fraggap = skb_prev->len - maxfraglen;
10831da177e4SLinus Torvalds else
10841da177e4SLinus Torvalds fraggap = 0;
10851da177e4SLinus Torvalds
10861da177e4SLinus Torvalds /*
10871da177e4SLinus Torvalds * If remaining data exceeds the mtu,
10881da177e4SLinus Torvalds * we know we need more fragment(s).
10891da177e4SLinus Torvalds */
10901da177e4SLinus Torvalds datalen = length + fraggap;
10911da177e4SLinus Torvalds if (datalen > mtu - fragheaderlen)
10921da177e4SLinus Torvalds datalen = maxfraglen - fragheaderlen;
10931da177e4SLinus Torvalds fraglen = datalen + fragheaderlen;
1094aba36930SWillem de Bruijn pagedlen = 0;
10951da177e4SLinus Torvalds
10966d123b81SJakub Kicinski alloc_extra = hh_len + 15;
10976d123b81SJakub Kicinski alloc_extra += exthdrlen;
1098353e5c9aSSteffen Klassert
10991da177e4SLinus Torvalds /* The last fragment gets additional space at tail.
11001da177e4SLinus Torvalds * Note, with MSG_MORE we overallocate on fragments,
11011da177e4SLinus Torvalds * because we have no idea what fragment will be
11021da177e4SLinus Torvalds * the last.
11031da177e4SLinus Torvalds */
110433f99dc7SSteffen Klassert if (datalen == length + fraggap)
11056d123b81SJakub Kicinski alloc_extra += rt->dst.trailer_len;
11066d123b81SJakub Kicinski
11076d123b81SJakub Kicinski if ((flags & MSG_MORE) &&
11086d123b81SJakub Kicinski !(rt->dst.dev->features&NETIF_F_SG))
11096d123b81SJakub Kicinski alloclen = mtu;
11106d123b81SJakub Kicinski else if (!paged &&
11116d123b81SJakub Kicinski (fraglen + alloc_extra < SKB_MAX_ALLOC ||
11126d123b81SJakub Kicinski !(rt->dst.dev->features & NETIF_F_SG)))
11136d123b81SJakub Kicinski alloclen = fraglen;
111447cf8899SPavel Begunkov else {
11158eb77cc7SPavel Begunkov alloclen = fragheaderlen + transhdrlen;
11168eb77cc7SPavel Begunkov pagedlen = datalen - transhdrlen;
11176d123b81SJakub Kicinski }
11186d123b81SJakub Kicinski
11196d123b81SJakub Kicinski alloclen += alloc_extra;
112033f99dc7SSteffen Klassert
11211da177e4SLinus Torvalds if (transhdrlen) {
11226d123b81SJakub Kicinski skb = sock_alloc_send_skb(sk, alloclen,
11231da177e4SLinus Torvalds (flags & MSG_DONTWAIT), &err);
11241da177e4SLinus Torvalds } else {
11251da177e4SLinus Torvalds skb = NULL;
1126694aba69SEric Dumazet if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
11271da177e4SLinus Torvalds 2 * sk->sk_sndbuf)
11286d123b81SJakub Kicinski skb = alloc_skb(alloclen,
11291da177e4SLinus Torvalds sk->sk_allocation);
113051456b29SIan Morris if (unlikely(!skb))
11311da177e4SLinus Torvalds err = -ENOBUFS;
11321da177e4SLinus Torvalds }
113351456b29SIan Morris if (!skb)
11341da177e4SLinus Torvalds goto error;
11351da177e4SLinus Torvalds
11361da177e4SLinus Torvalds /*
11371da177e4SLinus Torvalds * Fill in the control structures
11381da177e4SLinus Torvalds */
11391da177e4SLinus Torvalds skb->ip_summed = csummode;
11401da177e4SLinus Torvalds skb->csum = 0;
11411da177e4SLinus Torvalds skb_reserve(skb, hh_len);
114211878b40SWillem de Bruijn
11431da177e4SLinus Torvalds /*
11441da177e4SLinus Torvalds * Find where to start putting bytes.
11451da177e4SLinus Torvalds */
114615e36f5bSWillem de Bruijn data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1147c14d2450SArnaldo Carvalho de Melo skb_set_network_header(skb, exthdrlen);
1148b0e380b1SArnaldo Carvalho de Melo skb->transport_header = (skb->network_header +
1149b0e380b1SArnaldo Carvalho de Melo fragheaderlen);
1150353e5c9aSSteffen Klassert data += fragheaderlen + exthdrlen;
11511da177e4SLinus Torvalds
11521da177e4SLinus Torvalds if (fraggap) {
11531da177e4SLinus Torvalds skb->csum = skb_copy_and_csum_bits(
11541da177e4SLinus Torvalds skb_prev, maxfraglen,
11558d5930dfSAl Viro data + transhdrlen, fraggap);
11561da177e4SLinus Torvalds skb_prev->csum = csum_sub(skb_prev->csum,
11571da177e4SLinus Torvalds skb->csum);
11581da177e4SLinus Torvalds data += fraggap;
1159e9fa4f7bSHerbert Xu pskb_trim_unique(skb_prev, maxfraglen);
11601da177e4SLinus Torvalds }
11611da177e4SLinus Torvalds
116215e36f5bSWillem de Bruijn copy = datalen - transhdrlen - fraggap - pagedlen;
11630f71c9caSDavid Howells /* [!] NOTE: copy will be negative if pagedlen>0
11640f71c9caSDavid Howells * because then the equation reduces to -fraggap.
11650f71c9caSDavid Howells */
11661da177e4SLinus Torvalds if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
11671da177e4SLinus Torvalds err = -EFAULT;
11681da177e4SLinus Torvalds kfree_skb(skb);
11691da177e4SLinus Torvalds goto error;
11700f71c9caSDavid Howells } else if (flags & MSG_SPLICE_PAGES) {
11710f71c9caSDavid Howells copy = 0;
11721da177e4SLinus Torvalds }
11731da177e4SLinus Torvalds
11741da177e4SLinus Torvalds offset += copy;
117515e36f5bSWillem de Bruijn length -= copy + transhdrlen;
11761da177e4SLinus Torvalds transhdrlen = 0;
11771da177e4SLinus Torvalds exthdrlen = 0;
11781da177e4SLinus Torvalds csummode = CHECKSUM_NONE;
11791da177e4SLinus Torvalds
118052900d22SWillem de Bruijn /* only the initial fragment is time stamped */
118152900d22SWillem de Bruijn skb_shinfo(skb)->tx_flags = cork->tx_flags;
118252900d22SWillem de Bruijn cork->tx_flags = 0;
118352900d22SWillem de Bruijn skb_shinfo(skb)->tskey = tskey;
118452900d22SWillem de Bruijn tskey = 0;
118552900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, &extra_uref);
118652900d22SWillem de Bruijn
11870dec879fSJulian Anastasov if ((flags & MSG_CONFIRM) && !skb_prev)
11880dec879fSJulian Anastasov skb_set_dst_pending_confirm(skb, 1);
11890dec879fSJulian Anastasov
11901da177e4SLinus Torvalds /*
11911da177e4SLinus Torvalds * Put the packet on the pending queue.
11921da177e4SLinus Torvalds */
1193694aba69SEric Dumazet if (!skb->destructor) {
1194694aba69SEric Dumazet skb->destructor = sock_wfree;
1195694aba69SEric Dumazet skb->sk = sk;
1196694aba69SEric Dumazet wmem_alloc_delta += skb->truesize;
1197694aba69SEric Dumazet }
11981470ddf7SHerbert Xu __skb_queue_tail(queue, skb);
11991da177e4SLinus Torvalds continue;
12001da177e4SLinus Torvalds }
12011da177e4SLinus Torvalds
12021da177e4SLinus Torvalds if (copy > length)
12031da177e4SLinus Torvalds copy = length;
12041da177e4SLinus Torvalds
1205113f99c3SWillem de Bruijn if (!(rt->dst.dev->features&NETIF_F_SG) &&
1206113f99c3SWillem de Bruijn skb_tailroom(skb) >= copy) {
12071da177e4SLinus Torvalds unsigned int off;
12081da177e4SLinus Torvalds
12091da177e4SLinus Torvalds off = skb->len;
12101da177e4SLinus Torvalds if (getfrag(from, skb_put(skb, copy),
12111da177e4SLinus Torvalds offset, copy, off, skb) < 0) {
12121da177e4SLinus Torvalds __skb_trim(skb, off);
12131da177e4SLinus Torvalds err = -EFAULT;
12141da177e4SLinus Torvalds goto error;
12151da177e4SLinus Torvalds }
12167da0dde6SDavid Howells } else if (flags & MSG_SPLICE_PAGES) {
12177da0dde6SDavid Howells struct msghdr *msg = from;
12187da0dde6SDavid Howells
12190f71c9caSDavid Howells err = -EIO;
12200f71c9caSDavid Howells if (WARN_ON_ONCE(copy > msg->msg_iter.count))
12210f71c9caSDavid Howells goto error;
12220f71c9caSDavid Howells
12237da0dde6SDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
12247da0dde6SDavid Howells sk->sk_allocation);
12257da0dde6SDavid Howells if (err < 0)
12267da0dde6SDavid Howells goto error;
12277da0dde6SDavid Howells copy = err;
12287da0dde6SDavid Howells wmem_alloc_delta += copy;
1229c445f31bSPavel Begunkov } else if (!zc) {
12301da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags;
12311da177e4SLinus Torvalds
12321da177e4SLinus Torvalds err = -ENOMEM;
12335640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag))
12341da177e4SLinus Torvalds goto error;
12351da177e4SLinus Torvalds
1236c445f31bSPavel Begunkov skb_zcopy_downgrade_managed(skb);
12375640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page,
12385640f768SEric Dumazet pfrag->offset)) {
12391da177e4SLinus Torvalds err = -EMSGSIZE;
12405640f768SEric Dumazet if (i == MAX_SKB_FRAGS)
12411da177e4SLinus Torvalds goto error;
12425640f768SEric Dumazet
12435640f768SEric Dumazet __skb_fill_page_desc(skb, i, pfrag->page,
12445640f768SEric Dumazet pfrag->offset, 0);
12455640f768SEric Dumazet skb_shinfo(skb)->nr_frags = ++i;
12465640f768SEric Dumazet get_page(pfrag->page);
12471da177e4SLinus Torvalds }
12485640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset);
12495640f768SEric Dumazet if (getfrag(from,
12505640f768SEric Dumazet page_address(pfrag->page) + pfrag->offset,
12515640f768SEric Dumazet offset, copy, skb->len, skb) < 0)
12525640f768SEric Dumazet goto error_efault;
12535640f768SEric Dumazet
12545640f768SEric Dumazet pfrag->offset += copy;
12555640f768SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1256ede57d58SRichard Gobert skb_len_add(skb, copy);
1257694aba69SEric Dumazet wmem_alloc_delta += copy;
1258b5947e5dSWillem de Bruijn } else {
1259b5947e5dSWillem de Bruijn err = skb_zerocopy_iter_dgram(skb, from, copy);
1260b5947e5dSWillem de Bruijn if (err < 0)
1261b5947e5dSWillem de Bruijn goto error;
12621da177e4SLinus Torvalds }
12631da177e4SLinus Torvalds offset += copy;
12641da177e4SLinus Torvalds length -= copy;
12651da177e4SLinus Torvalds }
12661da177e4SLinus Torvalds
12679e8445a5SPaolo Abeni if (wmem_alloc_delta)
1268694aba69SEric Dumazet refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
12691da177e4SLinus Torvalds return 0;
12701da177e4SLinus Torvalds
12715640f768SEric Dumazet error_efault:
12725640f768SEric Dumazet err = -EFAULT;
12731da177e4SLinus Torvalds error:
12748e044917SJonathan Lemon net_zcopy_put_abort(uarg, extra_uref);
12751470ddf7SHerbert Xu cork->length -= length;
12765e38e270SPavel Emelyanov IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1277694aba69SEric Dumazet refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1278*19a788bdSVadim Fedorenko if (hold_tskey)
1279*19a788bdSVadim Fedorenko atomic_dec(&sk->sk_tskey);
12801da177e4SLinus Torvalds return err;
12811da177e4SLinus Torvalds }
12821da177e4SLinus Torvalds
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)12831470ddf7SHerbert Xu static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
12841470ddf7SHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp)
12851470ddf7SHerbert Xu {
1286f6d8bd05SEric Dumazet struct ip_options_rcu *opt;
12871470ddf7SHerbert Xu struct rtable *rt;
12881470ddf7SHerbert Xu
12899783ccd0SGao Feng rt = *rtp;
12909783ccd0SGao Feng if (unlikely(!rt))
12919783ccd0SGao Feng return -EFAULT;
12929783ccd0SGao Feng
12938160eb9aSZhipeng Lu cork->fragsize = ip_sk_use_pmtu(sk) ?
12948160eb9aSZhipeng Lu dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
12958160eb9aSZhipeng Lu
12968160eb9aSZhipeng Lu if (!inetdev_valid_mtu(cork->fragsize))
12978160eb9aSZhipeng Lu return -ENETUNREACH;
12988160eb9aSZhipeng Lu
12991470ddf7SHerbert Xu /*
13001470ddf7SHerbert Xu * setup for corking.
13011470ddf7SHerbert Xu */
13021470ddf7SHerbert Xu opt = ipc->opt;
13031470ddf7SHerbert Xu if (opt) {
130451456b29SIan Morris if (!cork->opt) {
13051470ddf7SHerbert Xu cork->opt = kmalloc(sizeof(struct ip_options) + 40,
13061470ddf7SHerbert Xu sk->sk_allocation);
130751456b29SIan Morris if (unlikely(!cork->opt))
13081470ddf7SHerbert Xu return -ENOBUFS;
13091470ddf7SHerbert Xu }
1310f6d8bd05SEric Dumazet memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
13111470ddf7SHerbert Xu cork->flags |= IPCORK_OPT;
13121470ddf7SHerbert Xu cork->addr = ipc->addr;
13131470ddf7SHerbert Xu }
13149783ccd0SGao Feng
1315fbf47813SWillem de Bruijn cork->gso_size = ipc->gso_size;
1316501a90c9SEric Dumazet
13171470ddf7SHerbert Xu cork->dst = &rt->dst;
1318501a90c9SEric Dumazet /* We stole this route, caller should not release it. */
1319501a90c9SEric Dumazet *rtp = NULL;
1320501a90c9SEric Dumazet
13211470ddf7SHerbert Xu cork->length = 0;
1322aa661581SFrancesco Fusco cork->ttl = ipc->ttl;
1323aa661581SFrancesco Fusco cork->tos = ipc->tos;
1324c6af0c22SWillem de Bruijn cork->mark = ipc->sockc.mark;
1325aa661581SFrancesco Fusco cork->priority = ipc->priority;
1326bc969a97SJesus Sanchez-Palencia cork->transmit_time = ipc->sockc.transmit_time;
1327678ca42dSWillem de Bruijn cork->tx_flags = 0;
1328678ca42dSWillem de Bruijn sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
13291470ddf7SHerbert Xu
13301470ddf7SHerbert Xu return 0;
13311470ddf7SHerbert Xu }
13321470ddf7SHerbert Xu
13331470ddf7SHerbert Xu /*
1334c49cf266SDavid Howells * ip_append_data() can make one large IP datagram from many pieces of
1335c49cf266SDavid Howells * data. Each piece will be held on the socket until
1336c49cf266SDavid Howells * ip_push_pending_frames() is called. Each piece can be a page or
1337c49cf266SDavid Howells * non-page data.
13381470ddf7SHerbert Xu *
13391470ddf7SHerbert Xu * Not only UDP, other transport protocols - e.g. raw sockets - can use
13401470ddf7SHerbert Xu * this interface potentially.
13411470ddf7SHerbert Xu *
13421470ddf7SHerbert Xu * LATER: length must be adjusted by pad at tail, when it is required.
13431470ddf7SHerbert Xu */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1344f5fca608SDavid S. Miller int ip_append_data(struct sock *sk, struct flowi4 *fl4,
13451470ddf7SHerbert Xu int getfrag(void *from, char *to, int offset, int len,
13461470ddf7SHerbert Xu int odd, struct sk_buff *skb),
13471470ddf7SHerbert Xu void *from, int length, int transhdrlen,
13481470ddf7SHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp,
13491470ddf7SHerbert Xu unsigned int flags)
13501470ddf7SHerbert Xu {
13511470ddf7SHerbert Xu struct inet_sock *inet = inet_sk(sk);
13521470ddf7SHerbert Xu int err;
13531470ddf7SHerbert Xu
13541470ddf7SHerbert Xu if (flags&MSG_PROBE)
13551470ddf7SHerbert Xu return 0;
13561470ddf7SHerbert Xu
13571470ddf7SHerbert Xu if (skb_queue_empty(&sk->sk_write_queue)) {
1358bdc712b4SDavid S. Miller err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
13591470ddf7SHerbert Xu if (err)
13601470ddf7SHerbert Xu return err;
13611470ddf7SHerbert Xu } else {
13621470ddf7SHerbert Xu transhdrlen = 0;
13631470ddf7SHerbert Xu }
13641470ddf7SHerbert Xu
13655640f768SEric Dumazet return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
13665640f768SEric Dumazet sk_page_frag(sk), getfrag,
13671470ddf7SHerbert Xu from, length, transhdrlen, flags);
13681470ddf7SHerbert Xu }
13691470ddf7SHerbert Xu
ip_cork_release(struct inet_cork * cork)13701470ddf7SHerbert Xu static void ip_cork_release(struct inet_cork *cork)
1371429f08e9SPavel Emelyanov {
13721470ddf7SHerbert Xu cork->flags &= ~IPCORK_OPT;
13731470ddf7SHerbert Xu kfree(cork->opt);
13741470ddf7SHerbert Xu cork->opt = NULL;
13751470ddf7SHerbert Xu dst_release(cork->dst);
13761470ddf7SHerbert Xu cork->dst = NULL;
1377429f08e9SPavel Emelyanov }
1378429f08e9SPavel Emelyanov
13791da177e4SLinus Torvalds /*
13801da177e4SLinus Torvalds * Combined all pending IP fragments on the socket as one IP datagram
13811da177e4SLinus Torvalds * and push them out.
13821da177e4SLinus Torvalds */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)13831c32c5adSHerbert Xu struct sk_buff *__ip_make_skb(struct sock *sk,
138477968b78SDavid S. Miller struct flowi4 *fl4,
13851470ddf7SHerbert Xu struct sk_buff_head *queue,
13861470ddf7SHerbert Xu struct inet_cork *cork)
13871da177e4SLinus Torvalds {
13881da177e4SLinus Torvalds struct sk_buff *skb, *tmp_skb;
13891da177e4SLinus Torvalds struct sk_buff **tail_skb;
13901da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
13910388b004SPavel Emelyanov struct net *net = sock_net(sk);
13921da177e4SLinus Torvalds struct ip_options *opt = NULL;
13931470ddf7SHerbert Xu struct rtable *rt = (struct rtable *)cork->dst;
13941da177e4SLinus Torvalds struct iphdr *iph;
139576ab608dSAlexey Dobriyan __be16 df = 0;
13961da177e4SLinus Torvalds __u8 ttl;
13971da177e4SLinus Torvalds
139851456b29SIan Morris skb = __skb_dequeue(queue);
139951456b29SIan Morris if (!skb)
14001da177e4SLinus Torvalds goto out;
14011da177e4SLinus Torvalds tail_skb = &(skb_shinfo(skb)->frag_list);
14021da177e4SLinus Torvalds
14031da177e4SLinus Torvalds /* move skb->data to ip header from ext header */
1404d56f90a7SArnaldo Carvalho de Melo if (skb->data < skb_network_header(skb))
1405bbe735e4SArnaldo Carvalho de Melo __skb_pull(skb, skb_network_offset(skb));
14061470ddf7SHerbert Xu while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1407cfe1fc77SArnaldo Carvalho de Melo __skb_pull(tmp_skb, skb_network_header_len(skb));
14081da177e4SLinus Torvalds *tail_skb = tmp_skb;
14091da177e4SLinus Torvalds tail_skb = &(tmp_skb->next);
14101da177e4SLinus Torvalds skb->len += tmp_skb->len;
14111da177e4SLinus Torvalds skb->data_len += tmp_skb->len;
14121da177e4SLinus Torvalds skb->truesize += tmp_skb->truesize;
14131da177e4SLinus Torvalds tmp_skb->destructor = NULL;
14141da177e4SLinus Torvalds tmp_skb->sk = NULL;
14151da177e4SLinus Torvalds }
14161da177e4SLinus Torvalds
14171da177e4SLinus Torvalds /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
14181da177e4SLinus Torvalds * to fragment the frame generated here. No matter, what transforms
14191da177e4SLinus Torvalds * how transforms change size of the packet, it will come out.
14201da177e4SLinus Torvalds */
142160ff7467SWANG Cong skb->ignore_df = ip_sk_ignore_df(sk);
14221da177e4SLinus Torvalds
14231da177e4SLinus Torvalds /* DF bit is set when we want to see DF on outgoing frames.
142460ff7467SWANG Cong * If ignore_df is set too, we still allow to fragment this frame
14251da177e4SLinus Torvalds * locally. */
1426482fc609SHannes Frederic Sowa if (inet->pmtudisc == IP_PMTUDISC_DO ||
1427482fc609SHannes Frederic Sowa inet->pmtudisc == IP_PMTUDISC_PROBE ||
1428d8d1f30bSChangli Gao (skb->len <= dst_mtu(&rt->dst) &&
1429d8d1f30bSChangli Gao ip_dont_fragment(sk, &rt->dst)))
14301da177e4SLinus Torvalds df = htons(IP_DF);
14311da177e4SLinus Torvalds
14321470ddf7SHerbert Xu if (cork->flags & IPCORK_OPT)
14331470ddf7SHerbert Xu opt = cork->opt;
14341da177e4SLinus Torvalds
1435aa661581SFrancesco Fusco if (cork->ttl != 0)
1436aa661581SFrancesco Fusco ttl = cork->ttl;
1437aa661581SFrancesco Fusco else if (rt->rt_type == RTN_MULTICAST)
14381da177e4SLinus Torvalds ttl = inet->mc_ttl;
14391da177e4SLinus Torvalds else
1440d8d1f30bSChangli Gao ttl = ip_select_ttl(inet, &rt->dst);
14411da177e4SLinus Torvalds
1442749154aaSAnsis Atteka iph = ip_hdr(skb);
14431da177e4SLinus Torvalds iph->version = 4;
14441da177e4SLinus Torvalds iph->ihl = 5;
1445aa661581SFrancesco Fusco iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
14461da177e4SLinus Torvalds iph->frag_off = df;
14471da177e4SLinus Torvalds iph->ttl = ttl;
14481da177e4SLinus Torvalds iph->protocol = sk->sk_protocol;
144984f9307cSEric Dumazet ip_copy_addrs(iph, fl4);
1450b6a7719aSHannes Frederic Sowa ip_select_ident(net, skb, sk);
14511da177e4SLinus Torvalds
145222f728f8SDavid S. Miller if (opt) {
145322f728f8SDavid S. Miller iph->ihl += opt->optlen >> 2;
14544f0e3040SJakub Kicinski ip_options_build(skb, opt, cork->addr, rt);
145522f728f8SDavid S. Miller }
145622f728f8SDavid S. Miller
1457aa661581SFrancesco Fusco skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1458c6af0c22SWillem de Bruijn skb->mark = cork->mark;
1459bc969a97SJesus Sanchez-Palencia skb->tstamp = cork->transmit_time;
1460a21bba94SEric Dumazet /*
1461a21bba94SEric Dumazet * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1462a21bba94SEric Dumazet * on dst refcount
1463a21bba94SEric Dumazet */
14641470ddf7SHerbert Xu cork->dst = NULL;
1465d8d1f30bSChangli Gao skb_dst_set(skb, &rt->dst);
14661da177e4SLinus Torvalds
146799e5acaeSZiyang Xuan if (iph->protocol == IPPROTO_ICMP) {
146899e5acaeSZiyang Xuan u8 icmp_type;
146999e5acaeSZiyang Xuan
147099e5acaeSZiyang Xuan /* For such sockets, transhdrlen is zero when do ip_append_data(),
147199e5acaeSZiyang Xuan * so icmphdr does not in skb linear region and can not get icmp_type
147299e5acaeSZiyang Xuan * by icmp_hdr(skb)->type.
147399e5acaeSZiyang Xuan */
1474cafbe182SEric Dumazet if (sk->sk_type == SOCK_RAW &&
14755db08343SShigeru Yoshida !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
147699e5acaeSZiyang Xuan icmp_type = fl4->fl4_icmp_type;
147799e5acaeSZiyang Xuan else
147899e5acaeSZiyang Xuan icmp_type = icmp_hdr(skb)->type;
147999e5acaeSZiyang Xuan icmp_out_count(net, icmp_type);
148099e5acaeSZiyang Xuan }
148196793b48SDavid L Stevens
14821c32c5adSHerbert Xu ip_cork_release(cork);
14831c32c5adSHerbert Xu out:
14841c32c5adSHerbert Xu return skb;
14851c32c5adSHerbert Xu }
14861c32c5adSHerbert Xu
ip_send_skb(struct net * net,struct sk_buff * skb)1487b5ec8eeaSEric Dumazet int ip_send_skb(struct net *net, struct sk_buff *skb)
14881c32c5adSHerbert Xu {
14891c32c5adSHerbert Xu int err;
14901c32c5adSHerbert Xu
149133224b16SEric W. Biederman err = ip_local_out(net, skb->sk, skb);
14921da177e4SLinus Torvalds if (err) {
14931da177e4SLinus Torvalds if (err > 0)
14946ce9e7b5SEric Dumazet err = net_xmit_errno(err);
14951da177e4SLinus Torvalds if (err)
14961c32c5adSHerbert Xu IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
14971da177e4SLinus Torvalds }
14981da177e4SLinus Torvalds
14991da177e4SLinus Torvalds return err;
15001da177e4SLinus Torvalds }
15011da177e4SLinus Torvalds
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)150277968b78SDavid S. Miller int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
15031470ddf7SHerbert Xu {
15041c32c5adSHerbert Xu struct sk_buff *skb;
15051c32c5adSHerbert Xu
150677968b78SDavid S. Miller skb = ip_finish_skb(sk, fl4);
15071c32c5adSHerbert Xu if (!skb)
15081c32c5adSHerbert Xu return 0;
15091c32c5adSHerbert Xu
15101c32c5adSHerbert Xu /* Netfilter gets whole the not fragmented skb. */
1511b5ec8eeaSEric Dumazet return ip_send_skb(sock_net(sk), skb);
15121470ddf7SHerbert Xu }
15131470ddf7SHerbert Xu
15141da177e4SLinus Torvalds /*
15151da177e4SLinus Torvalds * Throw away all pending data on the socket.
15161da177e4SLinus Torvalds */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)15171470ddf7SHerbert Xu static void __ip_flush_pending_frames(struct sock *sk,
15181470ddf7SHerbert Xu struct sk_buff_head *queue,
15191470ddf7SHerbert Xu struct inet_cork *cork)
15201da177e4SLinus Torvalds {
15211da177e4SLinus Torvalds struct sk_buff *skb;
15221da177e4SLinus Torvalds
15231470ddf7SHerbert Xu while ((skb = __skb_dequeue_tail(queue)) != NULL)
15241da177e4SLinus Torvalds kfree_skb(skb);
15251da177e4SLinus Torvalds
15261470ddf7SHerbert Xu ip_cork_release(cork);
15271470ddf7SHerbert Xu }
15281470ddf7SHerbert Xu
ip_flush_pending_frames(struct sock * sk)15291470ddf7SHerbert Xu void ip_flush_pending_frames(struct sock *sk)
15301470ddf7SHerbert Xu {
1531bdc712b4SDavid S. Miller __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
15321da177e4SLinus Torvalds }
15331da177e4SLinus Torvalds
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,struct inet_cork * cork,unsigned int flags)15341c32c5adSHerbert Xu struct sk_buff *ip_make_skb(struct sock *sk,
153577968b78SDavid S. Miller struct flowi4 *fl4,
15361c32c5adSHerbert Xu int getfrag(void *from, char *to, int offset,
15371c32c5adSHerbert Xu int len, int odd, struct sk_buff *skb),
15381c32c5adSHerbert Xu void *from, int length, int transhdrlen,
15391c32c5adSHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp,
15401cd7884dSWillem de Bruijn struct inet_cork *cork, unsigned int flags)
15411c32c5adSHerbert Xu {
15421c32c5adSHerbert Xu struct sk_buff_head queue;
15431c32c5adSHerbert Xu int err;
15441c32c5adSHerbert Xu
15451c32c5adSHerbert Xu if (flags & MSG_PROBE)
15461c32c5adSHerbert Xu return NULL;
15471c32c5adSHerbert Xu
15481c32c5adSHerbert Xu __skb_queue_head_init(&queue);
15491c32c5adSHerbert Xu
15501cd7884dSWillem de Bruijn cork->flags = 0;
15511cd7884dSWillem de Bruijn cork->addr = 0;
15521cd7884dSWillem de Bruijn cork->opt = NULL;
15531cd7884dSWillem de Bruijn err = ip_setup_cork(sk, cork, ipc, rtp);
15541c32c5adSHerbert Xu if (err)
15551c32c5adSHerbert Xu return ERR_PTR(err);
15561c32c5adSHerbert Xu
15571cd7884dSWillem de Bruijn err = __ip_append_data(sk, fl4, &queue, cork,
15585640f768SEric Dumazet ¤t->task_frag, getfrag,
15591c32c5adSHerbert Xu from, length, transhdrlen, flags);
15601c32c5adSHerbert Xu if (err) {
15611cd7884dSWillem de Bruijn __ip_flush_pending_frames(sk, &queue, cork);
15621c32c5adSHerbert Xu return ERR_PTR(err);
15631c32c5adSHerbert Xu }
15641c32c5adSHerbert Xu
15651cd7884dSWillem de Bruijn return __ip_make_skb(sk, fl4, &queue, cork);
15661c32c5adSHerbert Xu }
15671da177e4SLinus Torvalds
15681da177e4SLinus Torvalds /*
15691da177e4SLinus Torvalds * Fetch data from kernel space and fill in checksum if needed.
15701da177e4SLinus Torvalds */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)15711da177e4SLinus Torvalds static int ip_reply_glue_bits(void *dptr, char *to, int offset,
15721da177e4SLinus Torvalds int len, int odd, struct sk_buff *skb)
15731da177e4SLinus Torvalds {
15745084205fSAl Viro __wsum csum;
15751da177e4SLinus Torvalds
1576cc44c17bSAl Viro csum = csum_partial_copy_nocheck(dptr+offset, to, len);
15771da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, csum, odd);
15781da177e4SLinus Torvalds return 0;
15791da177e4SLinus Torvalds }
15801da177e4SLinus Torvalds
15811da177e4SLinus Torvalds /*
15821da177e4SLinus Torvalds * Generic function to send a packet as reply to another packet.
1583be9f4a44SEric Dumazet * Used to send some TCP resets/acks so far.
15841da177e4SLinus Torvalds */
ip_send_unicast_reply(struct sock * sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len,u64 transmit_time,u32 txhash)1585bdbbb852SEric Dumazet void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
158624a2d43dSEric Dumazet const struct ip_options *sopt,
158724a2d43dSEric Dumazet __be32 daddr, __be32 saddr,
158824a2d43dSEric Dumazet const struct ip_reply_arg *arg,
1589c0a8966eSAntoine Tenart unsigned int len, u64 transmit_time, u32 txhash)
15901da177e4SLinus Torvalds {
1591f6d8bd05SEric Dumazet struct ip_options_data replyopts;
15921da177e4SLinus Torvalds struct ipcm_cookie ipc;
159377968b78SDavid S. Miller struct flowi4 fl4;
1594511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
1595bdbbb852SEric Dumazet struct net *net = sock_net(sk);
1596be9f4a44SEric Dumazet struct sk_buff *nskb;
15974062090eSVasily Averin int err;
1598f7ba868bSDavid Ahern int oif;
15991da177e4SLinus Torvalds
160091ed1e66SPaolo Abeni if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
16011da177e4SLinus Torvalds return;
16021da177e4SLinus Torvalds
160335178206SWillem de Bruijn ipcm_init(&ipc);
16040a5ebb80SDavid S. Miller ipc.addr = daddr;
1605d6fb396cSEric Dumazet ipc.sockc.transmit_time = transmit_time;
16061da177e4SLinus Torvalds
1607f6d8bd05SEric Dumazet if (replyopts.opt.opt.optlen) {
16081da177e4SLinus Torvalds ipc.opt = &replyopts.opt;
16091da177e4SLinus Torvalds
1610f6d8bd05SEric Dumazet if (replyopts.opt.opt.srr)
1611f6d8bd05SEric Dumazet daddr = replyopts.opt.opt.faddr;
16121da177e4SLinus Torvalds }
16131da177e4SLinus Torvalds
1614f7ba868bSDavid Ahern oif = arg->bound_dev_if;
16159b6c14d5SDavid Ahern if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
16169b6c14d5SDavid Ahern oif = skb->skb_iif;
1617f7ba868bSDavid Ahern
1618f7ba868bSDavid Ahern flowi4_init_output(&fl4, oif,
161900483690SJon Maxwell IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
162066b13d99SEric Dumazet RT_TOS(arg->tos),
1621be9f4a44SEric Dumazet RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1622538de0e0SDavid S. Miller ip_reply_arg_flowi_flags(arg),
162370e73416SDavid S. Miller daddr, saddr,
1624e2d118a1SLorenzo Colitti tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1625e2d118a1SLorenzo Colitti arg->uid);
16263df98d79SPaul Moore security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1627e22aa148Ssewookseo rt = ip_route_output_flow(net, &fl4, sk);
1628b23dd4feSDavid S. Miller if (IS_ERR(rt))
16291da177e4SLinus Torvalds return;
16301da177e4SLinus Torvalds
1631ba9e04a7SWei Wang inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
16321da177e4SLinus Torvalds
1633eddc9ec5SArnaldo Carvalho de Melo sk->sk_protocol = ip_hdr(skb)->protocol;
1634f0e48dbfSPatrick McHardy sk->sk_bound_dev_if = arg->bound_dev_if;
16351227c177SKuniyuki Iwashima sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
16360da7536fSWillem de Bruijn ipc.sockc.mark = fl4.flowi4_mark;
16374062090eSVasily Averin err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
16384062090eSVasily Averin len, 0, &ipc, &rt, MSG_DONTWAIT);
16394062090eSVasily Averin if (unlikely(err)) {
16404062090eSVasily Averin ip_flush_pending_frames(sk);
16414062090eSVasily Averin goto out;
16424062090eSVasily Averin }
16434062090eSVasily Averin
1644be9f4a44SEric Dumazet nskb = skb_peek(&sk->sk_write_queue);
1645be9f4a44SEric Dumazet if (nskb) {
16461da177e4SLinus Torvalds if (arg->csumoffset >= 0)
1647be9f4a44SEric Dumazet *((__sum16 *)skb_transport_header(nskb) +
1648be9f4a44SEric Dumazet arg->csumoffset) = csum_fold(csum_add(nskb->csum,
16499c70220bSArnaldo Carvalho de Melo arg->csum));
1650be9f4a44SEric Dumazet nskb->ip_summed = CHECKSUM_NONE;
1651d98d58a0SMartin KaFai Lau nskb->mono_delivery_time = !!transmit_time;
1652c0a8966eSAntoine Tenart if (txhash)
1653c0a8966eSAntoine Tenart skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
165477968b78SDavid S. Miller ip_push_pending_frames(sk, &fl4);
16551da177e4SLinus Torvalds }
16564062090eSVasily Averin out:
16571da177e4SLinus Torvalds ip_rt_put(rt);
16581da177e4SLinus Torvalds }
16591da177e4SLinus Torvalds
ip_init(void)16601da177e4SLinus Torvalds void __init ip_init(void)
16611da177e4SLinus Torvalds {
16621da177e4SLinus Torvalds ip_rt_init();
16631da177e4SLinus Torvalds inet_initpeers();
16641da177e4SLinus Torvalds
166572c1d3bdSWANG Cong #if defined(CONFIG_IP_MULTICAST)
166672c1d3bdSWANG Cong igmp_mc_init();
16671da177e4SLinus Torvalds #endif
16681da177e4SLinus Torvalds }
1669