xref: /openbmc/linux/net/ipv4/ip_output.c (revision 3c5b4d69c358a9275a8de98f87caf6eda644b086)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		The Internet Protocol (IP) output module.
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Donald Becker, <becker@super.org>
121da177e4SLinus Torvalds  *		Alan Cox, <Alan.Cox@linux.org>
131da177e4SLinus Torvalds  *		Richard Underwood
141da177e4SLinus Torvalds  *		Stefan Becker, <stefanb@yello.ping.de>
151da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
161da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
171da177e4SLinus Torvalds  *		Hirokazu Takahashi, <taka@valinux.co.jp>
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  *	See ip_input.c for original log
201da177e4SLinus Torvalds  *
211da177e4SLinus Torvalds  *	Fixes:
221da177e4SLinus Torvalds  *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
231da177e4SLinus Torvalds  *		Mike Kilburn	:	htons() missing in ip_build_xmit.
241da177e4SLinus Torvalds  *		Bradford Johnson:	Fix faulty handling of some frames when
251da177e4SLinus Torvalds  *					no route is found.
261da177e4SLinus Torvalds  *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
271da177e4SLinus Torvalds  *					(in case if packet not accepted by
281da177e4SLinus Torvalds  *					output firewall rules)
291da177e4SLinus Torvalds  *		Mike McLagan	:	Routing by source
301da177e4SLinus Torvalds  *		Alexey Kuznetsov:	use new route cache
311da177e4SLinus Torvalds  *		Andi Kleen:		Fix broken PMTU recovery and remove
321da177e4SLinus Torvalds  *					some redundant tests.
331da177e4SLinus Torvalds  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
341da177e4SLinus Torvalds  *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
351da177e4SLinus Torvalds  *		Andi Kleen	:	Split fast and slow ip_build_xmit path
361da177e4SLinus Torvalds  *					for decreased register pressure on x86
37a66e04ceSBhaskar Chowdhury  *					and more readability.
381da177e4SLinus Torvalds  *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
391da177e4SLinus Torvalds  *					silently drop skb instead of failing with -EPERM.
401da177e4SLinus Torvalds  *		Detlev Wengorz	:	Copy protocol for fragments.
411da177e4SLinus Torvalds  *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
421da177e4SLinus Torvalds  *					datagrams.
431da177e4SLinus Torvalds  *		Hirokazu Takahashi:	sendfile() on UDP works now.
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds 
467c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
471da177e4SLinus Torvalds #include <linux/module.h>
481da177e4SLinus Torvalds #include <linux/types.h>
491da177e4SLinus Torvalds #include <linux/kernel.h>
501da177e4SLinus Torvalds #include <linux/mm.h>
511da177e4SLinus Torvalds #include <linux/string.h>
521da177e4SLinus Torvalds #include <linux/errno.h>
53a1f8e7f7SAl Viro #include <linux/highmem.h>
545a0e3ad6STejun Heo #include <linux/slab.h>
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds #include <linux/socket.h>
571da177e4SLinus Torvalds #include <linux/sockios.h>
581da177e4SLinus Torvalds #include <linux/in.h>
591da177e4SLinus Torvalds #include <linux/inet.h>
601da177e4SLinus Torvalds #include <linux/netdevice.h>
611da177e4SLinus Torvalds #include <linux/etherdevice.h>
621da177e4SLinus Torvalds #include <linux/proc_fs.h>
631da177e4SLinus Torvalds #include <linux/stat.h>
641da177e4SLinus Torvalds #include <linux/init.h>
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds #include <net/snmp.h>
671da177e4SLinus Torvalds #include <net/ip.h>
681da177e4SLinus Torvalds #include <net/protocol.h>
691da177e4SLinus Torvalds #include <net/route.h>
70cfacb057SPatrick McHardy #include <net/xfrm.h>
711da177e4SLinus Torvalds #include <linux/skbuff.h>
721da177e4SLinus Torvalds #include <net/sock.h>
731da177e4SLinus Torvalds #include <net/arp.h>
741da177e4SLinus Torvalds #include <net/icmp.h>
751da177e4SLinus Torvalds #include <net/checksum.h>
76d457a0e3SEric Dumazet #include <net/gso.h>
771da177e4SLinus Torvalds #include <net/inetpeer.h>
78ba9e04a7SWei Wang #include <net/inet_ecn.h>
7914972cbdSRoopa Prabhu #include <net/lwtunnel.h>
8033b48679SDaniel Mack #include <linux/bpf-cgroup.h>
811da177e4SLinus Torvalds #include <linux/igmp.h>
821da177e4SLinus Torvalds #include <linux/netfilter_ipv4.h>
831da177e4SLinus Torvalds #include <linux/netfilter_bridge.h>
841da177e4SLinus Torvalds #include <linux/netlink.h>
856cbb0df7SArnaldo Carvalho de Melo #include <linux/tcp.h>
861da177e4SLinus Torvalds 
87694869b3SEric W. Biederman static int
88694869b3SEric W. Biederman ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
89c5501eb3SFlorian Westphal 	    unsigned int mtu,
90694869b3SEric W. Biederman 	    int (*output)(struct net *, struct sock *, struct sk_buff *));
9149d16b23SAndy Zhou 
921da177e4SLinus Torvalds /* Generate a checksum for an outgoing IP datagram. */
932fbd9679SDenis Efremov void ip_send_check(struct iphdr *iph)
941da177e4SLinus Torvalds {
951da177e4SLinus Torvalds 	iph->check = 0;
961da177e4SLinus Torvalds 	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
971da177e4SLinus Torvalds }
984bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_send_check);
991da177e4SLinus Torvalds 
100cf91a99dSEric W. Biederman int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
101c439cb2eSHerbert Xu {
102c439cb2eSHerbert Xu 	struct iphdr *iph = ip_hdr(skb);
103c439cb2eSHerbert Xu 
104b1a78b9bSXin Long 	iph_set_totlen(iph, skb->len);
105c439cb2eSHerbert Xu 	ip_send_check(iph);
106a8e3e1a9SDavid Ahern 
107a8e3e1a9SDavid Ahern 	/* if egress device is enslaved to an L3 master device pass the
108a8e3e1a9SDavid Ahern 	 * skb to its handler for processing
109a8e3e1a9SDavid Ahern 	 */
110a8e3e1a9SDavid Ahern 	skb = l3mdev_ip_out(sk, skb);
111a8e3e1a9SDavid Ahern 	if (unlikely(!skb))
112a8e3e1a9SDavid Ahern 		return 0;
113a8e3e1a9SDavid Ahern 
114f4180439SEli Cooper 	skb->protocol = htons(ETH_P_IP);
115f4180439SEli Cooper 
11629a26a56SEric W. Biederman 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
11729a26a56SEric W. Biederman 		       net, sk, skb, NULL, skb_dst(skb)->dev,
11813206b6bSEric W. Biederman 		       dst_output);
1197026b1ddSDavid Miller }
1207026b1ddSDavid Miller 
12133224b16SEric W. Biederman int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
122c439cb2eSHerbert Xu {
123c439cb2eSHerbert Xu 	int err;
124c439cb2eSHerbert Xu 
125cf91a99dSEric W. Biederman 	err = __ip_local_out(net, sk, skb);
126c439cb2eSHerbert Xu 	if (likely(err == 1))
12713206b6bSEric W. Biederman 		err = dst_output(net, sk, skb);
128c439cb2eSHerbert Xu 
129c439cb2eSHerbert Xu 	return err;
130c439cb2eSHerbert Xu }
131e2cb77dbSEric W. Biederman EXPORT_SYMBOL_GPL(ip_local_out);
132c439cb2eSHerbert Xu 
133abc17a11SEric Dumazet static inline int ip_select_ttl(const struct inet_sock *inet,
134abc17a11SEric Dumazet 				const struct dst_entry *dst)
1351da177e4SLinus Torvalds {
1361da177e4SLinus Torvalds 	int ttl = inet->uc_ttl;
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds 	if (ttl < 0)
139323e126fSDavid S. Miller 		ttl = ip4_dst_hoplimit(dst);
1401da177e4SLinus Torvalds 	return ttl;
1411da177e4SLinus Torvalds }
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds /*
1441da177e4SLinus Torvalds  *		Add an ip header to a skbuff and send it out.
1451da177e4SLinus Torvalds  *
1461da177e4SLinus Torvalds  */
147cfe673b0SEric Dumazet int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
148de033b7dSWei Wang 			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
149de033b7dSWei Wang 			  u8 tos)
1501da177e4SLinus Torvalds {
151abc17a11SEric Dumazet 	const struct inet_sock *inet = inet_sk(sk);
152511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
15377589ce0SEric W. Biederman 	struct net *net = sock_net(sk);
1541da177e4SLinus Torvalds 	struct iphdr *iph;
1551da177e4SLinus Torvalds 
1561da177e4SLinus Torvalds 	/* Build the IP header. */
157f6d8bd05SEric Dumazet 	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
1588856dfa3SArnaldo Carvalho de Melo 	skb_reset_network_header(skb);
159eddc9ec5SArnaldo Carvalho de Melo 	iph = ip_hdr(skb);
1601da177e4SLinus Torvalds 	iph->version  = 4;
1611da177e4SLinus Torvalds 	iph->ihl      = 5;
162de033b7dSWei Wang 	iph->tos      = tos;
163d8d1f30bSChangli Gao 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
164dd927a26SDavid S. Miller 	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
165dd927a26SDavid S. Miller 	iph->saddr    = saddr;
1661da177e4SLinus Torvalds 	iph->protocol = sk->sk_protocol;
167970a5a3eSEric Dumazet 	/* Do not bother generating IPID for small packets (eg SYNACK) */
168970a5a3eSEric Dumazet 	if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
169cfe673b0SEric Dumazet 		iph->frag_off = htons(IP_DF);
170cfe673b0SEric Dumazet 		iph->id = 0;
171cfe673b0SEric Dumazet 	} else {
172cfe673b0SEric Dumazet 		iph->frag_off = 0;
173970a5a3eSEric Dumazet 		/* TCP packets here are SYNACK with fat IPv4/TCP options.
174970a5a3eSEric Dumazet 		 * Avoid using the hashed IP ident generator.
175970a5a3eSEric Dumazet 		 */
176970a5a3eSEric Dumazet 		if (sk->sk_protocol == IPPROTO_TCP)
1777e3cf084SJason A. Donenfeld 			iph->id = (__force __be16)get_random_u16();
178970a5a3eSEric Dumazet 		else
17977589ce0SEric W. Biederman 			__ip_select_ident(net, iph, 1);
180cfe673b0SEric Dumazet 	}
1811da177e4SLinus Torvalds 
182f6d8bd05SEric Dumazet 	if (opt && opt->opt.optlen) {
183f6d8bd05SEric Dumazet 		iph->ihl += opt->opt.optlen>>2;
1844f0e3040SJakub Kicinski 		ip_options_build(skb, &opt->opt, daddr, rt);
1851da177e4SLinus Torvalds 	}
1861da177e4SLinus Torvalds 
1871da177e4SLinus Torvalds 	skb->priority = sk->sk_priority;
188e05a90ecSJamal Hadi Salim 	if (!skb->mark)
189*3c5b4d69SEric Dumazet 		skb->mark = READ_ONCE(sk->sk_mark);
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds 	/* Send it out. */
19233224b16SEric W. Biederman 	return ip_local_out(net, skb->sk, skb);
1931da177e4SLinus Torvalds }
194d8c97a94SArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
195d8c97a94SArnaldo Carvalho de Melo 
196694869b3SEric W. Biederman static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
1971da177e4SLinus Torvalds {
198adf30907SEric Dumazet 	struct dst_entry *dst = skb_dst(skb);
19980787ebcSMitsuru Chinen 	struct rtable *rt = (struct rtable *)dst;
2001da177e4SLinus Torvalds 	struct net_device *dev = dst->dev;
201c2636b4dSChuck Lever 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
202f6b72b62SDavid S. Miller 	struct neighbour *neigh;
2035c9f7c1dSDavid Ahern 	bool is_v6gw = false;
2041da177e4SLinus Torvalds 
205edf391ffSNeil Horman 	if (rt->rt_type == RTN_MULTICAST) {
2064ba1bf42SEric W. Biederman 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
207edf391ffSNeil Horman 	} else if (rt->rt_type == RTN_BROADCAST)
2084ba1bf42SEric W. Biederman 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
20980787ebcSMitsuru Chinen 
2103b04dddeSStephen Hemminger 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2115678a595SVasily Averin 		skb = skb_expand_head(skb, hh_len);
2125678a595SVasily Averin 		if (!skb)
2131da177e4SLinus Torvalds 			return -ENOMEM;
2141da177e4SLinus Torvalds 	}
2151da177e4SLinus Torvalds 
21614972cbdSRoopa Prabhu 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
21714972cbdSRoopa Prabhu 		int res = lwtunnel_xmit(skb);
21814972cbdSRoopa Prabhu 
21914972cbdSRoopa Prabhu 		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
22014972cbdSRoopa Prabhu 			return res;
22114972cbdSRoopa Prabhu 	}
22214972cbdSRoopa Prabhu 
22309eed119SEric Dumazet 	rcu_read_lock();
2245c9f7c1dSDavid Ahern 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2259871f1adSVasiliy Kulikov 	if (!IS_ERR(neigh)) {
2264ff06203SJulian Anastasov 		int res;
2274ff06203SJulian Anastasov 
2284ff06203SJulian Anastasov 		sock_confirm_neigh(skb, neigh);
2295c9f7c1dSDavid Ahern 		/* if crossing protocols, can not use the cached header */
2305c9f7c1dSDavid Ahern 		res = neigh_output(neigh, skb, is_v6gw);
23109eed119SEric Dumazet 		rcu_read_unlock();
232f2c31e32SEric Dumazet 		return res;
233f2c31e32SEric Dumazet 	}
23409eed119SEric Dumazet 	rcu_read_unlock();
23505e3aa09SDavid S. Miller 
236e87cc472SJoe Perches 	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
237e87cc472SJoe Perches 			    __func__);
2385e187189SMenglong Dong 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
2391da177e4SLinus Torvalds 	return -EINVAL;
2401da177e4SLinus Torvalds }
2411da177e4SLinus Torvalds 
242694869b3SEric W. Biederman static int ip_finish_output_gso(struct net *net, struct sock *sk,
243694869b3SEric W. Biederman 				struct sk_buff *skb, unsigned int mtu)
244c7ba65d7SFlorian Westphal {
24588bebdf5SJason A. Donenfeld 	struct sk_buff *segs, *nskb;
246c7ba65d7SFlorian Westphal 	netdev_features_t features;
247c7ba65d7SFlorian Westphal 	int ret = 0;
248c7ba65d7SFlorian Westphal 
2499ee6c5dcSLance Richardson 	/* common case: seglen is <= mtu
250359ebda2SShmulik Ladkani 	 */
251779b7931SDaniel Axtens 	if (skb_gso_validate_network_len(skb, mtu))
252694869b3SEric W. Biederman 		return ip_finish_output2(net, sk, skb);
253c7ba65d7SFlorian Westphal 
2540ace81ecSLance Richardson 	/* Slowpath -  GSO segment length exceeds the egress MTU.
255c7ba65d7SFlorian Westphal 	 *
2560ace81ecSLance Richardson 	 * This can happen in several cases:
2570ace81ecSLance Richardson 	 *  - Forwarding of a TCP GRO skb, when DF flag is not set.
2580ace81ecSLance Richardson 	 *  - Forwarding of an skb that arrived on a virtualization interface
2590ace81ecSLance Richardson 	 *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
2600ace81ecSLance Richardson 	 *    stack.
2610ace81ecSLance Richardson 	 *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
2620ace81ecSLance Richardson 	 *    interface with a smaller MTU.
2630ace81ecSLance Richardson 	 *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
2640ace81ecSLance Richardson 	 *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
265a66e04ceSBhaskar Chowdhury 	 *    insufficient MTU.
266c7ba65d7SFlorian Westphal 	 */
267c7ba65d7SFlorian Westphal 	features = netif_skb_features(skb);
268a08e7fd9SCambda Zhu 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
269c7ba65d7SFlorian Westphal 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
270330966e5SFlorian Westphal 	if (IS_ERR_OR_NULL(segs)) {
271c7ba65d7SFlorian Westphal 		kfree_skb(skb);
272c7ba65d7SFlorian Westphal 		return -ENOMEM;
273c7ba65d7SFlorian Westphal 	}
274c7ba65d7SFlorian Westphal 
275c7ba65d7SFlorian Westphal 	consume_skb(skb);
276c7ba65d7SFlorian Westphal 
27788bebdf5SJason A. Donenfeld 	skb_list_walk_safe(segs, segs, nskb) {
278c7ba65d7SFlorian Westphal 		int err;
279c7ba65d7SFlorian Westphal 
280a8305bffSDavid S. Miller 		skb_mark_not_on_list(segs);
281694869b3SEric W. Biederman 		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
282c7ba65d7SFlorian Westphal 
283c7ba65d7SFlorian Westphal 		if (err && ret == 0)
284c7ba65d7SFlorian Westphal 			ret = err;
28588bebdf5SJason A. Donenfeld 	}
286c7ba65d7SFlorian Westphal 
287c7ba65d7SFlorian Westphal 	return ret;
288c7ba65d7SFlorian Westphal }
289c7ba65d7SFlorian Westphal 
290956fe219Sbrakmo static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2911da177e4SLinus Torvalds {
292c5501eb3SFlorian Westphal 	unsigned int mtu;
293c5501eb3SFlorian Westphal 
2945c901daaSPatrick McHardy #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
2955c901daaSPatrick McHardy 	/* Policy lookup after SNAT yielded a new policy */
29600db4124SIan Morris 	if (skb_dst(skb)->xfrm) {
29748d5cad8SPatrick McHardy 		IPCB(skb)->flags |= IPSKB_REROUTED;
29813206b6bSEric W. Biederman 		return dst_output(net, sk, skb);
29948d5cad8SPatrick McHardy 	}
3005c901daaSPatrick McHardy #endif
301fedbb6b4SShmulik Ladkani 	mtu = ip_skb_dst_mtu(sk, skb);
302c7ba65d7SFlorian Westphal 	if (skb_is_gso(skb))
303694869b3SEric W. Biederman 		return ip_finish_output_gso(net, sk, skb, mtu);
304c7ba65d7SFlorian Westphal 
305bb4cc1a1SFlorian Westphal 	if (skb->len > mtu || IPCB(skb)->frag_max_size)
306694869b3SEric W. Biederman 		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
307c7ba65d7SFlorian Westphal 
308694869b3SEric W. Biederman 	return ip_finish_output2(net, sk, skb);
3091da177e4SLinus Torvalds }
3101da177e4SLinus Torvalds 
311956fe219Sbrakmo static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
312956fe219Sbrakmo {
313956fe219Sbrakmo 	int ret;
314956fe219Sbrakmo 
315956fe219Sbrakmo 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
316956fe219Sbrakmo 	switch (ret) {
317956fe219Sbrakmo 	case NET_XMIT_SUCCESS:
318956fe219Sbrakmo 		return __ip_finish_output(net, sk, skb);
319956fe219Sbrakmo 	case NET_XMIT_CN:
320956fe219Sbrakmo 		return __ip_finish_output(net, sk, skb) ? : ret;
321956fe219Sbrakmo 	default:
3225e187189SMenglong Dong 		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
323956fe219Sbrakmo 		return ret;
324956fe219Sbrakmo 	}
325956fe219Sbrakmo }
326956fe219Sbrakmo 
32733b48679SDaniel Mack static int ip_mc_finish_output(struct net *net, struct sock *sk,
32833b48679SDaniel Mack 			       struct sk_buff *skb)
32933b48679SDaniel Mack {
3305b18f128SStephen Suryaputra 	struct rtable *new_rt;
331d96ff269SDavid S. Miller 	bool do_cn = false;
332d96ff269SDavid S. Miller 	int ret, err;
33333b48679SDaniel Mack 
33433b48679SDaniel Mack 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
335956fe219Sbrakmo 	switch (ret) {
336956fe219Sbrakmo 	case NET_XMIT_CN:
337d96ff269SDavid S. Miller 		do_cn = true;
338a8eceea8SJoe Perches 		fallthrough;
339d96ff269SDavid S. Miller 	case NET_XMIT_SUCCESS:
340d96ff269SDavid S. Miller 		break;
341956fe219Sbrakmo 	default:
3425e187189SMenglong Dong 		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
34333b48679SDaniel Mack 		return ret;
34433b48679SDaniel Mack 	}
34533b48679SDaniel Mack 
3465b18f128SStephen Suryaputra 	/* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
3475b18f128SStephen Suryaputra 	 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
3485b18f128SStephen Suryaputra 	 * see ipv4_pktinfo_prepare().
3495b18f128SStephen Suryaputra 	 */
3505b18f128SStephen Suryaputra 	new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
3515b18f128SStephen Suryaputra 	if (new_rt) {
3525b18f128SStephen Suryaputra 		new_rt->rt_iif = 0;
3535b18f128SStephen Suryaputra 		skb_dst_drop(skb);
3545b18f128SStephen Suryaputra 		skb_dst_set(skb, &new_rt->dst);
3555b18f128SStephen Suryaputra 	}
3565b18f128SStephen Suryaputra 
357d96ff269SDavid S. Miller 	err = dev_loopback_xmit(net, sk, skb);
358d96ff269SDavid S. Miller 	return (do_cn && err) ? ret : err;
35933b48679SDaniel Mack }
36033b48679SDaniel Mack 
361ede2059dSEric W. Biederman int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3621da177e4SLinus Torvalds {
363511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
364d8d1f30bSChangli Gao 	struct net_device *dev = rt->dst.dev;
3651da177e4SLinus Torvalds 
3661da177e4SLinus Torvalds 	/*
3671da177e4SLinus Torvalds 	 *	If the indicated interface is up and running, send the packet.
3681da177e4SLinus Torvalds 	 */
36988f5cc24SEric W. Biederman 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
3701da177e4SLinus Torvalds 
3711da177e4SLinus Torvalds 	skb->dev = dev;
3721da177e4SLinus Torvalds 	skb->protocol = htons(ETH_P_IP);
3731da177e4SLinus Torvalds 
3741da177e4SLinus Torvalds 	/*
3751da177e4SLinus Torvalds 	 *	Multicasts are looped back for other local users
3761da177e4SLinus Torvalds 	 */
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds 	if (rt->rt_flags&RTCF_MULTICAST) {
3797ad6848cSOctavian Purdila 		if (sk_mc_loop(sk)
3801da177e4SLinus Torvalds #ifdef CONFIG_IP_MROUTE
3811da177e4SLinus Torvalds 		/* Small optimization: do not loopback not local frames,
3821da177e4SLinus Torvalds 		   which returned after forwarding; they will be  dropped
3831da177e4SLinus Torvalds 		   by ip_mr_input in any case.
3841da177e4SLinus Torvalds 		   Note, that local frames are looped back to be delivered
3851da177e4SLinus Torvalds 		   to local recipients.
3861da177e4SLinus Torvalds 
3871da177e4SLinus Torvalds 		   This check is duplicated in ip_mr_input at the moment.
3881da177e4SLinus Torvalds 		 */
3899d4fb27dSJoe Perches 		    &&
3909d4fb27dSJoe Perches 		    ((rt->rt_flags & RTCF_LOCAL) ||
3919d4fb27dSJoe Perches 		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
3921da177e4SLinus Torvalds #endif
3931da177e4SLinus Torvalds 		   ) {
3941da177e4SLinus Torvalds 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3951da177e4SLinus Torvalds 			if (newskb)
3969bbc768aSJan Engelhardt 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
39729a26a56SEric W. Biederman 					net, sk, newskb, NULL, newskb->dev,
39833b48679SDaniel Mack 					ip_mc_finish_output);
3991da177e4SLinus Torvalds 		}
4001da177e4SLinus Torvalds 
4011da177e4SLinus Torvalds 		/* Multicasts with ttl 0 must not go beyond the host */
4021da177e4SLinus Torvalds 
403eddc9ec5SArnaldo Carvalho de Melo 		if (ip_hdr(skb)->ttl == 0) {
4041da177e4SLinus Torvalds 			kfree_skb(skb);
4051da177e4SLinus Torvalds 			return 0;
4061da177e4SLinus Torvalds 		}
4071da177e4SLinus Torvalds 	}
4081da177e4SLinus Torvalds 
4091da177e4SLinus Torvalds 	if (rt->rt_flags&RTCF_BROADCAST) {
4101da177e4SLinus Torvalds 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
4111da177e4SLinus Torvalds 		if (newskb)
41229a26a56SEric W. Biederman 			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
41329a26a56SEric W. Biederman 				net, sk, newskb, NULL, newskb->dev,
41433b48679SDaniel Mack 				ip_mc_finish_output);
4151da177e4SLinus Torvalds 	}
4161da177e4SLinus Torvalds 
41729a26a56SEric W. Biederman 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
41829a26a56SEric W. Biederman 			    net, sk, skb, NULL, skb->dev,
41929a26a56SEric W. Biederman 			    ip_finish_output,
42048d5cad8SPatrick McHardy 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
4211da177e4SLinus Torvalds }
4221da177e4SLinus Torvalds 
423ede2059dSEric W. Biederman int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
4241da177e4SLinus Torvalds {
42528f8bfd1SPhil Sutter 	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
4261bd9bef6SPatrick McHardy 
42788f5cc24SEric W. Biederman 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
4281da177e4SLinus Torvalds 
4291bd9bef6SPatrick McHardy 	skb->dev = dev;
4301bd9bef6SPatrick McHardy 	skb->protocol = htons(ETH_P_IP);
4311bd9bef6SPatrick McHardy 
43229a26a56SEric W. Biederman 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
43328f8bfd1SPhil Sutter 			    net, sk, skb, indev, dev,
43448d5cad8SPatrick McHardy 			    ip_finish_output,
43548d5cad8SPatrick McHardy 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
4361da177e4SLinus Torvalds }
4376585d7dcSBrian Vazquez EXPORT_SYMBOL(ip_output);
4381da177e4SLinus Torvalds 
43984f9307cSEric Dumazet /*
44084f9307cSEric Dumazet  * copy saddr and daddr, possibly using 64bit load/stores
44184f9307cSEric Dumazet  * Equivalent to :
44284f9307cSEric Dumazet  *   iph->saddr = fl4->saddr;
44384f9307cSEric Dumazet  *   iph->daddr = fl4->daddr;
44484f9307cSEric Dumazet  */
44584f9307cSEric Dumazet static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
44684f9307cSEric Dumazet {
44784f9307cSEric Dumazet 	BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
44884f9307cSEric Dumazet 		     offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
4496321c7acSGustavo A. R. Silva 
4506321c7acSGustavo A. R. Silva 	iph->saddr = fl4->saddr;
4516321c7acSGustavo A. R. Silva 	iph->daddr = fl4->daddr;
45284f9307cSEric Dumazet }
45384f9307cSEric Dumazet 
454b0270e91SEric Dumazet /* Note: skb->sk can be different from sk, in case of tunnels */
45569b9e1e0SXin Long int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
45669b9e1e0SXin Long 		    __u8 tos)
4571da177e4SLinus Torvalds {
4581da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
45977589ce0SEric W. Biederman 	struct net *net = sock_net(sk);
460f6d8bd05SEric Dumazet 	struct ip_options_rcu *inet_opt;
461b57ae01aSDavid S. Miller 	struct flowi4 *fl4;
4621da177e4SLinus Torvalds 	struct rtable *rt;
4631da177e4SLinus Torvalds 	struct iphdr *iph;
464ab6e3febSEric Dumazet 	int res;
4651da177e4SLinus Torvalds 
4661da177e4SLinus Torvalds 	/* Skip all of this if the packet is already routed,
4671da177e4SLinus Torvalds 	 * f.e. by something like SCTP.
4681da177e4SLinus Torvalds 	 */
469ab6e3febSEric Dumazet 	rcu_read_lock();
470f6d8bd05SEric Dumazet 	inet_opt = rcu_dereference(inet->inet_opt);
471ea4fc0d6SDavid S. Miller 	fl4 = &fl->u.ip4;
472511c3f92SEric Dumazet 	rt = skb_rtable(skb);
47300db4124SIan Morris 	if (rt)
4741da177e4SLinus Torvalds 		goto packet_routed;
4751da177e4SLinus Torvalds 
4761da177e4SLinus Torvalds 	/* Make sure we can route this packet. */
4771da177e4SLinus Torvalds 	rt = (struct rtable *)__sk_dst_check(sk, 0);
47851456b29SIan Morris 	if (!rt) {
4793ca3c68eSAl Viro 		__be32 daddr;
4801da177e4SLinus Torvalds 
4811da177e4SLinus Torvalds 		/* Use correct destination address if we have options. */
482c720c7e8SEric Dumazet 		daddr = inet->inet_daddr;
483f6d8bd05SEric Dumazet 		if (inet_opt && inet_opt->opt.srr)
484f6d8bd05SEric Dumazet 			daddr = inet_opt->opt.faddr;
4851da177e4SLinus Torvalds 
4861da177e4SLinus Torvalds 		/* If this fails, retransmit mechanism of transport layer will
4871da177e4SLinus Torvalds 		 * keep trying until route appears or the connection times
4881da177e4SLinus Torvalds 		 * itself out.
4891da177e4SLinus Torvalds 		 */
49077589ce0SEric W. Biederman 		rt = ip_route_output_ports(net, fl4, sk,
49178fbfd8aSDavid S. Miller 					   daddr, inet->inet_saddr,
49278fbfd8aSDavid S. Miller 					   inet->inet_dport,
49378fbfd8aSDavid S. Miller 					   inet->inet_sport,
49478fbfd8aSDavid S. Miller 					   sk->sk_protocol,
49569b9e1e0SXin Long 					   RT_CONN_FLAGS_TOS(sk, tos),
49678fbfd8aSDavid S. Miller 					   sk->sk_bound_dev_if);
497b23dd4feSDavid S. Miller 		if (IS_ERR(rt))
4981da177e4SLinus Torvalds 			goto no_route;
499d8d1f30bSChangli Gao 		sk_setup_caps(sk, &rt->dst);
5001da177e4SLinus Torvalds 	}
501d8d1f30bSChangli Gao 	skb_dst_set_noref(skb, &rt->dst);
5021da177e4SLinus Torvalds 
5031da177e4SLinus Torvalds packet_routed:
50477d5bc7eSDavid Ahern 	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
5051da177e4SLinus Torvalds 		goto no_route;
5061da177e4SLinus Torvalds 
5071da177e4SLinus Torvalds 	/* OK, we know where to send it, allocate and build IP header. */
508f6d8bd05SEric Dumazet 	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
5098856dfa3SArnaldo Carvalho de Melo 	skb_reset_network_header(skb);
510eddc9ec5SArnaldo Carvalho de Melo 	iph = ip_hdr(skb);
51169b9e1e0SXin Long 	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
51260ff7467SWANG Cong 	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
5131da177e4SLinus Torvalds 		iph->frag_off = htons(IP_DF);
5141da177e4SLinus Torvalds 	else
5151da177e4SLinus Torvalds 		iph->frag_off = 0;
516d8d1f30bSChangli Gao 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
5171da177e4SLinus Torvalds 	iph->protocol = sk->sk_protocol;
51884f9307cSEric Dumazet 	ip_copy_addrs(iph, fl4);
51984f9307cSEric Dumazet 
5201da177e4SLinus Torvalds 	/* Transport layer set skb->h.foo itself. */
5211da177e4SLinus Torvalds 
522f6d8bd05SEric Dumazet 	if (inet_opt && inet_opt->opt.optlen) {
523f6d8bd05SEric Dumazet 		iph->ihl += inet_opt->opt.optlen >> 2;
5244f0e3040SJakub Kicinski 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
5251da177e4SLinus Torvalds 	}
5261da177e4SLinus Torvalds 
52777589ce0SEric W. Biederman 	ip_select_ident_segs(net, skb, sk,
528b6a7719aSHannes Frederic Sowa 			     skb_shinfo(skb)->gso_segs ?: 1);
5291da177e4SLinus Torvalds 
530b0270e91SEric Dumazet 	/* TODO : should we use skb->sk here instead of sk ? */
5311da177e4SLinus Torvalds 	skb->priority = sk->sk_priority;
532*3c5b4d69SEric Dumazet 	skb->mark = READ_ONCE(sk->sk_mark);
5331da177e4SLinus Torvalds 
53433224b16SEric W. Biederman 	res = ip_local_out(net, sk, skb);
535ab6e3febSEric Dumazet 	rcu_read_unlock();
536ab6e3febSEric Dumazet 	return res;
5371da177e4SLinus Torvalds 
5381da177e4SLinus Torvalds no_route:
539ab6e3febSEric Dumazet 	rcu_read_unlock();
54077589ce0SEric W. Biederman 	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
5415e187189SMenglong Dong 	kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
5421da177e4SLinus Torvalds 	return -EHOSTUNREACH;
5431da177e4SLinus Torvalds }
54469b9e1e0SXin Long EXPORT_SYMBOL(__ip_queue_xmit);
5451da177e4SLinus Torvalds 
54605e22e83SEric Dumazet int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
54705e22e83SEric Dumazet {
54805e22e83SEric Dumazet 	return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
54905e22e83SEric Dumazet }
55005e22e83SEric Dumazet EXPORT_SYMBOL(ip_queue_xmit);
55105e22e83SEric Dumazet 
5521da177e4SLinus Torvalds static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
5531da177e4SLinus Torvalds {
5541da177e4SLinus Torvalds 	to->pkt_type = from->pkt_type;
5551da177e4SLinus Torvalds 	to->priority = from->priority;
5561da177e4SLinus Torvalds 	to->protocol = from->protocol;
557d2f0c961SShmulik Ladkani 	to->skb_iif = from->skb_iif;
558adf30907SEric Dumazet 	skb_dst_drop(to);
559fe76cda3SEric Dumazet 	skb_dst_copy(to, from);
5601da177e4SLinus Torvalds 	to->dev = from->dev;
56182e91ffeSThomas Graf 	to->mark = from->mark;
5621da177e4SLinus Torvalds 
5633dd1c9a1SPaolo Abeni 	skb_copy_hash(to, from);
5643dd1c9a1SPaolo Abeni 
5651da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED
5661da177e4SLinus Torvalds 	to->tc_index = from->tc_index;
5671da177e4SLinus Torvalds #endif
568e7ac05f3SYasuyuki Kozakai 	nf_copy(to, from);
569df5042f4SFlorian Westphal 	skb_ext_copy(to, from);
5706ca40d4eSJavier Martinez Canillas #if IS_ENABLED(CONFIG_IP_VS)
571c98d80edSJulian Anastasov 	to->ipvs_property = from->ipvs_property;
572c98d80edSJulian Anastasov #endif
573984bc16cSJames Morris 	skb_copy_secmark(to, from);
5741da177e4SLinus Torvalds }
5751da177e4SLinus Torvalds 
576694869b3SEric W. Biederman static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
577c5501eb3SFlorian Westphal 		       unsigned int mtu,
578694869b3SEric W. Biederman 		       int (*output)(struct net *, struct sock *, struct sk_buff *))
57949d16b23SAndy Zhou {
58049d16b23SAndy Zhou 	struct iphdr *iph = ip_hdr(skb);
58149d16b23SAndy Zhou 
582d6b915e2SFlorian Westphal 	if ((iph->frag_off & htons(IP_DF)) == 0)
583694869b3SEric W. Biederman 		return ip_do_fragment(net, sk, skb, output);
584d6b915e2SFlorian Westphal 
585d6b915e2SFlorian Westphal 	if (unlikely(!skb->ignore_df ||
58649d16b23SAndy Zhou 		     (IPCB(skb)->frag_max_size &&
58749d16b23SAndy Zhou 		      IPCB(skb)->frag_max_size > mtu))) {
5889479b0afSEric W. Biederman 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
58949d16b23SAndy Zhou 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
59049d16b23SAndy Zhou 			  htonl(mtu));
59149d16b23SAndy Zhou 		kfree_skb(skb);
59249d16b23SAndy Zhou 		return -EMSGSIZE;
59349d16b23SAndy Zhou 	}
59449d16b23SAndy Zhou 
595694869b3SEric W. Biederman 	return ip_do_fragment(net, sk, skb, output);
59649d16b23SAndy Zhou }
59749d16b23SAndy Zhou 
598c8b17be0SPablo Neira Ayuso void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
599c8b17be0SPablo Neira Ayuso 		      unsigned int hlen, struct ip_fraglist_iter *iter)
600c8b17be0SPablo Neira Ayuso {
601c8b17be0SPablo Neira Ayuso 	unsigned int first_len = skb_pagelen(skb);
602c8b17be0SPablo Neira Ayuso 
603b7034146SEric Dumazet 	iter->frag = skb_shinfo(skb)->frag_list;
604c8b17be0SPablo Neira Ayuso 	skb_frag_list_init(skb);
605c8b17be0SPablo Neira Ayuso 
606c8b17be0SPablo Neira Ayuso 	iter->offset = 0;
607c8b17be0SPablo Neira Ayuso 	iter->iph = iph;
608c8b17be0SPablo Neira Ayuso 	iter->hlen = hlen;
609c8b17be0SPablo Neira Ayuso 
610c8b17be0SPablo Neira Ayuso 	skb->data_len = first_len - skb_headlen(skb);
611c8b17be0SPablo Neira Ayuso 	skb->len = first_len;
612c8b17be0SPablo Neira Ayuso 	iph->tot_len = htons(first_len);
613c8b17be0SPablo Neira Ayuso 	iph->frag_off = htons(IP_MF);
614c8b17be0SPablo Neira Ayuso 	ip_send_check(iph);
615c8b17be0SPablo Neira Ayuso }
616c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_init);
617c8b17be0SPablo Neira Ayuso 
618c8b17be0SPablo Neira Ayuso void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
619c8b17be0SPablo Neira Ayuso {
620c8b17be0SPablo Neira Ayuso 	unsigned int hlen = iter->hlen;
621c8b17be0SPablo Neira Ayuso 	struct iphdr *iph = iter->iph;
622c8b17be0SPablo Neira Ayuso 	struct sk_buff *frag;
623c8b17be0SPablo Neira Ayuso 
624c8b17be0SPablo Neira Ayuso 	frag = iter->frag;
625c8b17be0SPablo Neira Ayuso 	frag->ip_summed = CHECKSUM_NONE;
626c8b17be0SPablo Neira Ayuso 	skb_reset_transport_header(frag);
627c8b17be0SPablo Neira Ayuso 	__skb_push(frag, hlen);
628c8b17be0SPablo Neira Ayuso 	skb_reset_network_header(frag);
629c8b17be0SPablo Neira Ayuso 	memcpy(skb_network_header(frag), iph, hlen);
630c8b17be0SPablo Neira Ayuso 	iter->iph = ip_hdr(frag);
631c8b17be0SPablo Neira Ayuso 	iph = iter->iph;
632c8b17be0SPablo Neira Ayuso 	iph->tot_len = htons(frag->len);
633c8b17be0SPablo Neira Ayuso 	ip_copy_metadata(frag, skb);
634c8b17be0SPablo Neira Ayuso 	iter->offset += skb->len - hlen;
635c8b17be0SPablo Neira Ayuso 	iph->frag_off = htons(iter->offset >> 3);
636c8b17be0SPablo Neira Ayuso 	if (frag->next)
637c8b17be0SPablo Neira Ayuso 		iph->frag_off |= htons(IP_MF);
638c8b17be0SPablo Neira Ayuso 	/* Ready, complete checksum */
639c8b17be0SPablo Neira Ayuso 	ip_send_check(iph);
640c8b17be0SPablo Neira Ayuso }
641c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_prepare);
642c8b17be0SPablo Neira Ayuso 
643065ff79fSPablo Neira Ayuso void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
644e7a409c3SEric Dumazet 		  unsigned int ll_rs, unsigned int mtu, bool DF,
645065ff79fSPablo Neira Ayuso 		  struct ip_frag_state *state)
646065ff79fSPablo Neira Ayuso {
647065ff79fSPablo Neira Ayuso 	struct iphdr *iph = ip_hdr(skb);
648065ff79fSPablo Neira Ayuso 
649e7a409c3SEric Dumazet 	state->DF = DF;
650065ff79fSPablo Neira Ayuso 	state->hlen = hlen;
651065ff79fSPablo Neira Ayuso 	state->ll_rs = ll_rs;
652065ff79fSPablo Neira Ayuso 	state->mtu = mtu;
653065ff79fSPablo Neira Ayuso 
654065ff79fSPablo Neira Ayuso 	state->left = skb->len - hlen;	/* Space per frame */
655065ff79fSPablo Neira Ayuso 	state->ptr = hlen;		/* Where to start from */
656065ff79fSPablo Neira Ayuso 
657065ff79fSPablo Neira Ayuso 	state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
658065ff79fSPablo Neira Ayuso 	state->not_last_frag = iph->frag_off & htons(IP_MF);
659065ff79fSPablo Neira Ayuso }
660065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_init);
661065ff79fSPablo Neira Ayuso 
66219c3401aSPablo Neira Ayuso static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
663faf482caSYajun Deng 			 bool first_frag)
66419c3401aSPablo Neira Ayuso {
66519c3401aSPablo Neira Ayuso 	/* Copy the flags to each fragment. */
66619c3401aSPablo Neira Ayuso 	IPCB(to)->flags = IPCB(from)->flags;
66719c3401aSPablo Neira Ayuso 
66819c3401aSPablo Neira Ayuso 	/* ANK: dirty, but effective trick. Upgrade options only if
66919c3401aSPablo Neira Ayuso 	 * the segment to be fragmented was THE FIRST (otherwise,
67019c3401aSPablo Neira Ayuso 	 * options are already fixed) and make it ONCE
67119c3401aSPablo Neira Ayuso 	 * on the initial skb, so that all the following fragments
67219c3401aSPablo Neira Ayuso 	 * will inherit fixed options.
67319c3401aSPablo Neira Ayuso 	 */
67419c3401aSPablo Neira Ayuso 	if (first_frag)
67519c3401aSPablo Neira Ayuso 		ip_options_fragment(from);
67619c3401aSPablo Neira Ayuso }
67719c3401aSPablo Neira Ayuso 
678065ff79fSPablo Neira Ayuso struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
679065ff79fSPablo Neira Ayuso {
680065ff79fSPablo Neira Ayuso 	unsigned int len = state->left;
681065ff79fSPablo Neira Ayuso 	struct sk_buff *skb2;
682065ff79fSPablo Neira Ayuso 	struct iphdr *iph;
683065ff79fSPablo Neira Ayuso 
684065ff79fSPablo Neira Ayuso 	/* IF: it doesn't fit, use 'mtu' - the data space left */
685065ff79fSPablo Neira Ayuso 	if (len > state->mtu)
686065ff79fSPablo Neira Ayuso 		len = state->mtu;
687065ff79fSPablo Neira Ayuso 	/* IF: we are not sending up to and including the packet end
688065ff79fSPablo Neira Ayuso 	   then align the next start on an eight byte boundary */
689065ff79fSPablo Neira Ayuso 	if (len < state->left)	{
690065ff79fSPablo Neira Ayuso 		len &= ~7;
691065ff79fSPablo Neira Ayuso 	}
692065ff79fSPablo Neira Ayuso 
693065ff79fSPablo Neira Ayuso 	/* Allocate buffer */
694065ff79fSPablo Neira Ayuso 	skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
695065ff79fSPablo Neira Ayuso 	if (!skb2)
696065ff79fSPablo Neira Ayuso 		return ERR_PTR(-ENOMEM);
697065ff79fSPablo Neira Ayuso 
698065ff79fSPablo Neira Ayuso 	/*
699065ff79fSPablo Neira Ayuso 	 *	Set up data on packet
700065ff79fSPablo Neira Ayuso 	 */
701065ff79fSPablo Neira Ayuso 
702065ff79fSPablo Neira Ayuso 	ip_copy_metadata(skb2, skb);
703065ff79fSPablo Neira Ayuso 	skb_reserve(skb2, state->ll_rs);
704065ff79fSPablo Neira Ayuso 	skb_put(skb2, len + state->hlen);
705065ff79fSPablo Neira Ayuso 	skb_reset_network_header(skb2);
706065ff79fSPablo Neira Ayuso 	skb2->transport_header = skb2->network_header + state->hlen;
707065ff79fSPablo Neira Ayuso 
708065ff79fSPablo Neira Ayuso 	/*
709065ff79fSPablo Neira Ayuso 	 *	Charge the memory for the fragment to any owner
710065ff79fSPablo Neira Ayuso 	 *	it might possess
711065ff79fSPablo Neira Ayuso 	 */
712065ff79fSPablo Neira Ayuso 
713065ff79fSPablo Neira Ayuso 	if (skb->sk)
714065ff79fSPablo Neira Ayuso 		skb_set_owner_w(skb2, skb->sk);
715065ff79fSPablo Neira Ayuso 
716065ff79fSPablo Neira Ayuso 	/*
717065ff79fSPablo Neira Ayuso 	 *	Copy the packet header into the new buffer.
718065ff79fSPablo Neira Ayuso 	 */
719065ff79fSPablo Neira Ayuso 
720065ff79fSPablo Neira Ayuso 	skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
721065ff79fSPablo Neira Ayuso 
722065ff79fSPablo Neira Ayuso 	/*
723065ff79fSPablo Neira Ayuso 	 *	Copy a block of the IP datagram.
724065ff79fSPablo Neira Ayuso 	 */
725065ff79fSPablo Neira Ayuso 	if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
726065ff79fSPablo Neira Ayuso 		BUG();
727065ff79fSPablo Neira Ayuso 	state->left -= len;
728065ff79fSPablo Neira Ayuso 
729065ff79fSPablo Neira Ayuso 	/*
730065ff79fSPablo Neira Ayuso 	 *	Fill in the new header fields.
731065ff79fSPablo Neira Ayuso 	 */
732065ff79fSPablo Neira Ayuso 	iph = ip_hdr(skb2);
733065ff79fSPablo Neira Ayuso 	iph->frag_off = htons((state->offset >> 3));
734e7a409c3SEric Dumazet 	if (state->DF)
735e7a409c3SEric Dumazet 		iph->frag_off |= htons(IP_DF);
736065ff79fSPablo Neira Ayuso 
737065ff79fSPablo Neira Ayuso 	/*
738065ff79fSPablo Neira Ayuso 	 *	Added AC : If we are fragmenting a fragment that's not the
739065ff79fSPablo Neira Ayuso 	 *		   last fragment then keep MF on each bit
740065ff79fSPablo Neira Ayuso 	 */
741065ff79fSPablo Neira Ayuso 	if (state->left > 0 || state->not_last_frag)
742065ff79fSPablo Neira Ayuso 		iph->frag_off |= htons(IP_MF);
743065ff79fSPablo Neira Ayuso 	state->ptr += len;
744065ff79fSPablo Neira Ayuso 	state->offset += len;
745065ff79fSPablo Neira Ayuso 
746065ff79fSPablo Neira Ayuso 	iph->tot_len = htons(len + state->hlen);
747065ff79fSPablo Neira Ayuso 
748065ff79fSPablo Neira Ayuso 	ip_send_check(iph);
749065ff79fSPablo Neira Ayuso 
750065ff79fSPablo Neira Ayuso 	return skb2;
751065ff79fSPablo Neira Ayuso }
752065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_next);
753065ff79fSPablo Neira Ayuso 
7541da177e4SLinus Torvalds /*
7551da177e4SLinus Torvalds  *	This IP datagram is too large to be sent in one piece.  Break it up into
7561da177e4SLinus Torvalds  *	smaller pieces (each of size equal to IP header plus
7571da177e4SLinus Torvalds  *	a block of the data of the original IP data part) that will yet fit in a
7581da177e4SLinus Torvalds  *	single device frame, and queue such a frame for sending.
7591da177e4SLinus Torvalds  */
7601da177e4SLinus Torvalds 
761694869b3SEric W. Biederman int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
762694869b3SEric W. Biederman 		   int (*output)(struct net *, struct sock *, struct sk_buff *))
7631da177e4SLinus Torvalds {
7641da177e4SLinus Torvalds 	struct iphdr *iph;
7651da177e4SLinus Torvalds 	struct sk_buff *skb2;
766a1ac9c8aSMartin KaFai Lau 	bool mono_delivery_time = skb->mono_delivery_time;
767511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
768065ff79fSPablo Neira Ayuso 	unsigned int mtu, hlen, ll_rs;
769c8b17be0SPablo Neira Ayuso 	struct ip_fraglist_iter iter;
7709669fffcSEric Dumazet 	ktime_t tstamp = skb->tstamp;
771065ff79fSPablo Neira Ayuso 	struct ip_frag_state state;
7721da177e4SLinus Torvalds 	int err = 0;
7731da177e4SLinus Torvalds 
774dbd3393cSHannes Frederic Sowa 	/* for offloaded checksums cleanup checksum before fragmentation */
775dbd3393cSHannes Frederic Sowa 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
776dbd3393cSHannes Frederic Sowa 	    (err = skb_checksum_help(skb)))
777dbd3393cSHannes Frederic Sowa 		goto fail;
778dbd3393cSHannes Frederic Sowa 
7791da177e4SLinus Torvalds 	/*
7801da177e4SLinus Torvalds 	 *	Point into the IP datagram header.
7811da177e4SLinus Torvalds 	 */
7821da177e4SLinus Torvalds 
783eddc9ec5SArnaldo Carvalho de Melo 	iph = ip_hdr(skb);
7841da177e4SLinus Torvalds 
785fedbb6b4SShmulik Ladkani 	mtu = ip_skb_dst_mtu(sk, skb);
786d6b915e2SFlorian Westphal 	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
787d6b915e2SFlorian Westphal 		mtu = IPCB(skb)->frag_max_size;
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds 	/*
7901da177e4SLinus Torvalds 	 *	Setup starting values.
7911da177e4SLinus Torvalds 	 */
7921da177e4SLinus Torvalds 
7931da177e4SLinus Torvalds 	hlen = iph->ihl * 4;
794f87c10a8SHannes Frederic Sowa 	mtu = mtu - hlen;	/* Size of data space */
79589cee8b1SHerbert Xu 	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
796254d900bSVasily Averin 	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
7971da177e4SLinus Torvalds 
7981da177e4SLinus Torvalds 	/* When frag_list is given, use it. First, check its validity:
7991da177e4SLinus Torvalds 	 * some transformers could create wrong frag_list or break existing
8001da177e4SLinus Torvalds 	 * one, it is not prohibited. In this case fall back to copying.
8011da177e4SLinus Torvalds 	 *
8021da177e4SLinus Torvalds 	 * LATER: this step can be merged to real generation of fragments,
8031da177e4SLinus Torvalds 	 * we can switch to copy when see the first bad fragment.
8041da177e4SLinus Torvalds 	 */
80521dc3301SDavid S. Miller 	if (skb_has_frag_list(skb)) {
8063d13008eSEric Dumazet 		struct sk_buff *frag, *frag2;
807c72d8cdaSAlexey Dobriyan 		unsigned int first_len = skb_pagelen(skb);
8081da177e4SLinus Torvalds 
8091da177e4SLinus Torvalds 		if (first_len - hlen > mtu ||
8101da177e4SLinus Torvalds 		    ((first_len - hlen) & 7) ||
81156f8a75cSPaul Gortmaker 		    ip_is_fragment(iph) ||
812254d900bSVasily Averin 		    skb_cloned(skb) ||
813254d900bSVasily Averin 		    skb_headroom(skb) < ll_rs)
8141da177e4SLinus Torvalds 			goto slow_path;
8151da177e4SLinus Torvalds 
816d7fcf1a5SDavid S. Miller 		skb_walk_frags(skb, frag) {
8171da177e4SLinus Torvalds 			/* Correct geometry. */
8181da177e4SLinus Torvalds 			if (frag->len > mtu ||
8191da177e4SLinus Torvalds 			    ((frag->len & 7) && frag->next) ||
820254d900bSVasily Averin 			    skb_headroom(frag) < hlen + ll_rs)
8213d13008eSEric Dumazet 				goto slow_path_clean;
8221da177e4SLinus Torvalds 
8231da177e4SLinus Torvalds 			/* Partially cloned skb? */
8241da177e4SLinus Torvalds 			if (skb_shared(frag))
8253d13008eSEric Dumazet 				goto slow_path_clean;
8262fdba6b0SHerbert Xu 
8272fdba6b0SHerbert Xu 			BUG_ON(frag->sk);
8282fdba6b0SHerbert Xu 			if (skb->sk) {
8292fdba6b0SHerbert Xu 				frag->sk = skb->sk;
8302fdba6b0SHerbert Xu 				frag->destructor = sock_wfree;
8312fdba6b0SHerbert Xu 			}
8323d13008eSEric Dumazet 			skb->truesize -= frag->truesize;
8331da177e4SLinus Torvalds 		}
8341da177e4SLinus Torvalds 
8351da177e4SLinus Torvalds 		/* Everything is OK. Generate! */
836c8b17be0SPablo Neira Ayuso 		ip_fraglist_init(skb, iph, hlen, &iter);
8371b9fbe81SYajun Deng 
8381da177e4SLinus Torvalds 		for (;;) {
8391da177e4SLinus Torvalds 			/* Prepare header of the next frame,
8401da177e4SLinus Torvalds 			 * before previous one went down. */
84119c3401aSPablo Neira Ayuso 			if (iter.frag) {
84227a8caa5SJakub Kicinski 				bool first_frag = (iter.offset == 0);
84327a8caa5SJakub Kicinski 
844faf482caSYajun Deng 				IPCB(iter.frag)->flags = IPCB(skb)->flags;
845c8b17be0SPablo Neira Ayuso 				ip_fraglist_prepare(skb, &iter);
84627a8caa5SJakub Kicinski 				if (first_frag && IPCB(skb)->opt.optlen) {
84727a8caa5SJakub Kicinski 					/* ipcb->opt is not populated for frags
84827a8caa5SJakub Kicinski 					 * coming from __ip_make_skb(),
84927a8caa5SJakub Kicinski 					 * ip_options_fragment() needs optlen
85027a8caa5SJakub Kicinski 					 */
85127a8caa5SJakub Kicinski 					IPCB(iter.frag)->opt.optlen =
85227a8caa5SJakub Kicinski 						IPCB(skb)->opt.optlen;
85327a8caa5SJakub Kicinski 					ip_options_fragment(iter.frag);
85427a8caa5SJakub Kicinski 					ip_send_check(iter.iph);
85527a8caa5SJakub Kicinski 				}
85619c3401aSPablo Neira Ayuso 			}
8571da177e4SLinus Torvalds 
858a1ac9c8aSMartin KaFai Lau 			skb_set_delivery_time(skb, tstamp, mono_delivery_time);
859694869b3SEric W. Biederman 			err = output(net, sk, skb);
8601da177e4SLinus Torvalds 
861dafee490SWei Dong 			if (!err)
86226a949dbSEric W. Biederman 				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
863c8b17be0SPablo Neira Ayuso 			if (err || !iter.frag)
8641da177e4SLinus Torvalds 				break;
8651da177e4SLinus Torvalds 
866c8b17be0SPablo Neira Ayuso 			skb = ip_fraglist_next(&iter);
8671da177e4SLinus Torvalds 		}
8681da177e4SLinus Torvalds 
8691da177e4SLinus Torvalds 		if (err == 0) {
87026a949dbSEric W. Biederman 			IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
8711da177e4SLinus Torvalds 			return 0;
8721da177e4SLinus Torvalds 		}
8731da177e4SLinus Torvalds 
874b7034146SEric Dumazet 		kfree_skb_list(iter.frag);
875942f146aSPablo Neira Ayuso 
87626a949dbSEric W. Biederman 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
8771da177e4SLinus Torvalds 		return err;
8783d13008eSEric Dumazet 
8793d13008eSEric Dumazet slow_path_clean:
8803d13008eSEric Dumazet 		skb_walk_frags(skb, frag2) {
8813d13008eSEric Dumazet 			if (frag2 == frag)
8823d13008eSEric Dumazet 				break;
8833d13008eSEric Dumazet 			frag2->sk = NULL;
8843d13008eSEric Dumazet 			frag2->destructor = NULL;
8853d13008eSEric Dumazet 			skb->truesize += frag2->truesize;
8863d13008eSEric Dumazet 		}
8871da177e4SLinus Torvalds 	}
8881da177e4SLinus Torvalds 
8891da177e4SLinus Torvalds slow_path:
8901da177e4SLinus Torvalds 	/*
8911da177e4SLinus Torvalds 	 *	Fragment the datagram.
8921da177e4SLinus Torvalds 	 */
8931da177e4SLinus Torvalds 
894e7a409c3SEric Dumazet 	ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
895e7a409c3SEric Dumazet 		     &state);
8961da177e4SLinus Torvalds 
8971da177e4SLinus Torvalds 	/*
8981da177e4SLinus Torvalds 	 *	Keep copying data until we run out.
8991da177e4SLinus Torvalds 	 */
9001da177e4SLinus Torvalds 
901065ff79fSPablo Neira Ayuso 	while (state.left > 0) {
90219c3401aSPablo Neira Ayuso 		bool first_frag = (state.offset == 0);
90319c3401aSPablo Neira Ayuso 
904065ff79fSPablo Neira Ayuso 		skb2 = ip_frag_next(skb, &state);
905065ff79fSPablo Neira Ayuso 		if (IS_ERR(skb2)) {
906065ff79fSPablo Neira Ayuso 			err = PTR_ERR(skb2);
9071da177e4SLinus Torvalds 			goto fail;
9081da177e4SLinus Torvalds 		}
909faf482caSYajun Deng 		ip_frag_ipcb(skb, skb2, first_frag);
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 		/*
9121da177e4SLinus Torvalds 		 *	Put this fragment into the sending queue.
9131da177e4SLinus Torvalds 		 */
914a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
915694869b3SEric W. Biederman 		err = output(net, sk, skb2);
9161da177e4SLinus Torvalds 		if (err)
9171da177e4SLinus Torvalds 			goto fail;
918dafee490SWei Dong 
91926a949dbSEric W. Biederman 		IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
9201da177e4SLinus Torvalds 	}
9215d0ba55bSEric Dumazet 	consume_skb(skb);
92226a949dbSEric W. Biederman 	IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
9231da177e4SLinus Torvalds 	return err;
9241da177e4SLinus Torvalds 
9251da177e4SLinus Torvalds fail:
9261da177e4SLinus Torvalds 	kfree_skb(skb);
92726a949dbSEric W. Biederman 	IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
9281da177e4SLinus Torvalds 	return err;
9291da177e4SLinus Torvalds }
93049d16b23SAndy Zhou EXPORT_SYMBOL(ip_do_fragment);
9312e2f7aefSPatrick McHardy 
9321da177e4SLinus Torvalds int
9331da177e4SLinus Torvalds ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
9341da177e4SLinus Torvalds {
935f69e6d13SAl Viro 	struct msghdr *msg = from;
9361da177e4SLinus Torvalds 
93784fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
9380b62fca2SAl Viro 		if (!copy_from_iter_full(to, len, &msg->msg_iter))
9391da177e4SLinus Torvalds 			return -EFAULT;
9401da177e4SLinus Torvalds 	} else {
94144bb9363SAl Viro 		__wsum csum = 0;
9420b62fca2SAl Viro 		if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
9431da177e4SLinus Torvalds 			return -EFAULT;
9441da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, csum, odd);
9451da177e4SLinus Torvalds 	}
9461da177e4SLinus Torvalds 	return 0;
9471da177e4SLinus Torvalds }
9484bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_generic_getfrag);
9491da177e4SLinus Torvalds 
950f5fca608SDavid S. Miller static int __ip_append_data(struct sock *sk,
951f5fca608SDavid S. Miller 			    struct flowi4 *fl4,
952f5fca608SDavid S. Miller 			    struct sk_buff_head *queue,
9531470ddf7SHerbert Xu 			    struct inet_cork *cork,
9545640f768SEric Dumazet 			    struct page_frag *pfrag,
9551470ddf7SHerbert Xu 			    int getfrag(void *from, char *to, int offset,
9561470ddf7SHerbert Xu 					int len, int odd, struct sk_buff *skb),
9571da177e4SLinus Torvalds 			    void *from, int length, int transhdrlen,
9581da177e4SLinus Torvalds 			    unsigned int flags)
9591da177e4SLinus Torvalds {
9601da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
961b5947e5dSWillem de Bruijn 	struct ubuf_info *uarg = NULL;
9621da177e4SLinus Torvalds 	struct sk_buff *skb;
96307df5294SHerbert Xu 	struct ip_options *opt = cork->opt;
9641da177e4SLinus Torvalds 	int hh_len;
9651da177e4SLinus Torvalds 	int exthdrlen;
9661da177e4SLinus Torvalds 	int mtu;
9671da177e4SLinus Torvalds 	int copy;
9681da177e4SLinus Torvalds 	int err;
9691da177e4SLinus Torvalds 	int offset = 0;
9708eb77cc7SPavel Begunkov 	bool zc = false;
971daba287bSHannes Frederic Sowa 	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
9721da177e4SLinus Torvalds 	int csummode = CHECKSUM_NONE;
9731470ddf7SHerbert Xu 	struct rtable *rt = (struct rtable *)cork->dst;
974694aba69SEric Dumazet 	unsigned int wmem_alloc_delta = 0;
975100f6d8eSWillem de Bruijn 	bool paged, extra_uref = false;
97609c2d251SWillem de Bruijn 	u32 tskey = 0;
9771da177e4SLinus Torvalds 
97896d7303eSSteffen Klassert 	skb = skb_peek_tail(queue);
97996d7303eSSteffen Klassert 
98096d7303eSSteffen Klassert 	exthdrlen = !skb ? rt->dst.header_len : 0;
981bec1f6f6SWillem de Bruijn 	mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
98215e36f5bSWillem de Bruijn 	paged = !!cork->gso_size;
983bec1f6f6SWillem de Bruijn 
9848ca5a579SVadim Fedorenko 	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
98509c2d251SWillem de Bruijn 	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
986a1cdec57SEric Dumazet 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
9871470ddf7SHerbert Xu 
988d8d1f30bSChangli Gao 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
9891da177e4SLinus Torvalds 
9901da177e4SLinus Torvalds 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
9911da177e4SLinus Torvalds 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
992cbc08a33SMiaohe Lin 	maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
9931da177e4SLinus Torvalds 
994daba287bSHannes Frederic Sowa 	if (cork->length + length > maxnonfragsize - fragheaderlen) {
995f5fca608SDavid S. Miller 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
99661e7f09dSHannes Frederic Sowa 			       mtu - (opt ? opt->optlen : 0));
9971da177e4SLinus Torvalds 		return -EMSGSIZE;
9981da177e4SLinus Torvalds 	}
9991da177e4SLinus Torvalds 
10001da177e4SLinus Torvalds 	/*
10011da177e4SLinus Torvalds 	 * transhdrlen > 0 means that this is the first fragment and we wish
10021da177e4SLinus Torvalds 	 * it won't be fragmented in the future.
10031da177e4SLinus Torvalds 	 */
10041da177e4SLinus Torvalds 	if (transhdrlen &&
10051da177e4SLinus Torvalds 	    length + fragheaderlen <= mtu &&
1006c8cd0989STom Herbert 	    rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1007bec1f6f6SWillem de Bruijn 	    (!(flags & MSG_MORE) || cork->gso_size) &&
1008cd027a54SJacek Kalwas 	    (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
100984fa7933SPatrick McHardy 		csummode = CHECKSUM_PARTIAL;
10101da177e4SLinus Torvalds 
1011c445f31bSPavel Begunkov 	if ((flags & MSG_ZEROCOPY) && length) {
1012c445f31bSPavel Begunkov 		struct msghdr *msg = from;
1013c445f31bSPavel Begunkov 
1014c445f31bSPavel Begunkov 		if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1015c445f31bSPavel Begunkov 			if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1016c445f31bSPavel Begunkov 				return -EINVAL;
1017c445f31bSPavel Begunkov 
1018c445f31bSPavel Begunkov 			/* Leave uarg NULL if can't zerocopy, callers should
1019c445f31bSPavel Begunkov 			 * be able to handle it.
1020c445f31bSPavel Begunkov 			 */
1021c445f31bSPavel Begunkov 			if ((rt->dst.dev->features & NETIF_F_SG) &&
1022c445f31bSPavel Begunkov 			    csummode == CHECKSUM_PARTIAL) {
1023c445f31bSPavel Begunkov 				paged = true;
1024c445f31bSPavel Begunkov 				zc = true;
1025c445f31bSPavel Begunkov 				uarg = msg->msg_ubuf;
1026c445f31bSPavel Begunkov 			}
1027c445f31bSPavel Begunkov 		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
10288c793822SJonathan Lemon 			uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1029b5947e5dSWillem de Bruijn 			if (!uarg)
1030b5947e5dSWillem de Bruijn 				return -ENOBUFS;
1031522924b5SWillem de Bruijn 			extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
1032b5947e5dSWillem de Bruijn 			if (rt->dst.dev->features & NETIF_F_SG &&
1033b5947e5dSWillem de Bruijn 			    csummode == CHECKSUM_PARTIAL) {
1034b5947e5dSWillem de Bruijn 				paged = true;
10358eb77cc7SPavel Begunkov 				zc = true;
1036b5947e5dSWillem de Bruijn 			} else {
1037e7d2b510SPavel Begunkov 				uarg_to_msgzc(uarg)->zerocopy = 0;
103852900d22SWillem de Bruijn 				skb_zcopy_set(skb, uarg, &extra_uref);
1039b5947e5dSWillem de Bruijn 			}
1040b5947e5dSWillem de Bruijn 		}
10417da0dde6SDavid Howells 	} else if ((flags & MSG_SPLICE_PAGES) && length) {
10427da0dde6SDavid Howells 		if (inet->hdrincl)
10437da0dde6SDavid Howells 			return -EPERM;
10445a6f6873SDavid Howells 		if (rt->dst.dev->features & NETIF_F_SG &&
10455a6f6873SDavid Howells 		    getfrag == ip_generic_getfrag)
10467da0dde6SDavid Howells 			/* We need an empty buffer to attach stuff to */
10477da0dde6SDavid Howells 			paged = true;
10487da0dde6SDavid Howells 		else
10497da0dde6SDavid Howells 			flags &= ~MSG_SPLICE_PAGES;
1050c445f31bSPavel Begunkov 	}
1051b5947e5dSWillem de Bruijn 
10521470ddf7SHerbert Xu 	cork->length += length;
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds 	/* So, what's going on in the loop below?
10551da177e4SLinus Torvalds 	 *
10561da177e4SLinus Torvalds 	 * We use calculated fragment length to generate chained skb,
10571da177e4SLinus Torvalds 	 * each of segments is IP fragment ready for sending to network after
10581da177e4SLinus Torvalds 	 * adding appropriate IP header.
10591da177e4SLinus Torvalds 	 */
10601da177e4SLinus Torvalds 
106126cde9f7SHerbert Xu 	if (!skb)
10621da177e4SLinus Torvalds 		goto alloc_new_skb;
10631da177e4SLinus Torvalds 
10641da177e4SLinus Torvalds 	while (length > 0) {
10651da177e4SLinus Torvalds 		/* Check if the remaining data fits into current packet. */
10661da177e4SLinus Torvalds 		copy = mtu - skb->len;
10671da177e4SLinus Torvalds 		if (copy < length)
10681da177e4SLinus Torvalds 			copy = maxfraglen - skb->len;
10691da177e4SLinus Torvalds 		if (copy <= 0) {
10701da177e4SLinus Torvalds 			char *data;
10711da177e4SLinus Torvalds 			unsigned int datalen;
10721da177e4SLinus Torvalds 			unsigned int fraglen;
10731da177e4SLinus Torvalds 			unsigned int fraggap;
10746d123b81SJakub Kicinski 			unsigned int alloclen, alloc_extra;
1075aba36930SWillem de Bruijn 			unsigned int pagedlen;
10761da177e4SLinus Torvalds 			struct sk_buff *skb_prev;
10771da177e4SLinus Torvalds alloc_new_skb:
10781da177e4SLinus Torvalds 			skb_prev = skb;
10791da177e4SLinus Torvalds 			if (skb_prev)
10801da177e4SLinus Torvalds 				fraggap = skb_prev->len - maxfraglen;
10811da177e4SLinus Torvalds 			else
10821da177e4SLinus Torvalds 				fraggap = 0;
10831da177e4SLinus Torvalds 
10841da177e4SLinus Torvalds 			/*
10851da177e4SLinus Torvalds 			 * If remaining data exceeds the mtu,
10861da177e4SLinus Torvalds 			 * we know we need more fragment(s).
10871da177e4SLinus Torvalds 			 */
10881da177e4SLinus Torvalds 			datalen = length + fraggap;
10891da177e4SLinus Torvalds 			if (datalen > mtu - fragheaderlen)
10901da177e4SLinus Torvalds 				datalen = maxfraglen - fragheaderlen;
10911da177e4SLinus Torvalds 			fraglen = datalen + fragheaderlen;
1092aba36930SWillem de Bruijn 			pagedlen = 0;
10931da177e4SLinus Torvalds 
10946d123b81SJakub Kicinski 			alloc_extra = hh_len + 15;
10956d123b81SJakub Kicinski 			alloc_extra += exthdrlen;
1096353e5c9aSSteffen Klassert 
10971da177e4SLinus Torvalds 			/* The last fragment gets additional space at tail.
10981da177e4SLinus Torvalds 			 * Note, with MSG_MORE we overallocate on fragments,
10991da177e4SLinus Torvalds 			 * because we have no idea what fragment will be
11001da177e4SLinus Torvalds 			 * the last.
11011da177e4SLinus Torvalds 			 */
110233f99dc7SSteffen Klassert 			if (datalen == length + fraggap)
11036d123b81SJakub Kicinski 				alloc_extra += rt->dst.trailer_len;
11046d123b81SJakub Kicinski 
11056d123b81SJakub Kicinski 			if ((flags & MSG_MORE) &&
11066d123b81SJakub Kicinski 			    !(rt->dst.dev->features&NETIF_F_SG))
11076d123b81SJakub Kicinski 				alloclen = mtu;
11086d123b81SJakub Kicinski 			else if (!paged &&
11096d123b81SJakub Kicinski 				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
11106d123b81SJakub Kicinski 				  !(rt->dst.dev->features & NETIF_F_SG)))
11116d123b81SJakub Kicinski 				alloclen = fraglen;
111247cf8899SPavel Begunkov 			else {
11138eb77cc7SPavel Begunkov 				alloclen = fragheaderlen + transhdrlen;
11148eb77cc7SPavel Begunkov 				pagedlen = datalen - transhdrlen;
11156d123b81SJakub Kicinski 			}
11166d123b81SJakub Kicinski 
11176d123b81SJakub Kicinski 			alloclen += alloc_extra;
111833f99dc7SSteffen Klassert 
11191da177e4SLinus Torvalds 			if (transhdrlen) {
11206d123b81SJakub Kicinski 				skb = sock_alloc_send_skb(sk, alloclen,
11211da177e4SLinus Torvalds 						(flags & MSG_DONTWAIT), &err);
11221da177e4SLinus Torvalds 			} else {
11231da177e4SLinus Torvalds 				skb = NULL;
1124694aba69SEric Dumazet 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
11251da177e4SLinus Torvalds 				    2 * sk->sk_sndbuf)
11266d123b81SJakub Kicinski 					skb = alloc_skb(alloclen,
11271da177e4SLinus Torvalds 							sk->sk_allocation);
112851456b29SIan Morris 				if (unlikely(!skb))
11291da177e4SLinus Torvalds 					err = -ENOBUFS;
11301da177e4SLinus Torvalds 			}
113151456b29SIan Morris 			if (!skb)
11321da177e4SLinus Torvalds 				goto error;
11331da177e4SLinus Torvalds 
11341da177e4SLinus Torvalds 			/*
11351da177e4SLinus Torvalds 			 *	Fill in the control structures
11361da177e4SLinus Torvalds 			 */
11371da177e4SLinus Torvalds 			skb->ip_summed = csummode;
11381da177e4SLinus Torvalds 			skb->csum = 0;
11391da177e4SLinus Torvalds 			skb_reserve(skb, hh_len);
114011878b40SWillem de Bruijn 
11411da177e4SLinus Torvalds 			/*
11421da177e4SLinus Torvalds 			 *	Find where to start putting bytes.
11431da177e4SLinus Torvalds 			 */
114415e36f5bSWillem de Bruijn 			data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1145c14d2450SArnaldo Carvalho de Melo 			skb_set_network_header(skb, exthdrlen);
1146b0e380b1SArnaldo Carvalho de Melo 			skb->transport_header = (skb->network_header +
1147b0e380b1SArnaldo Carvalho de Melo 						 fragheaderlen);
1148353e5c9aSSteffen Klassert 			data += fragheaderlen + exthdrlen;
11491da177e4SLinus Torvalds 
11501da177e4SLinus Torvalds 			if (fraggap) {
11511da177e4SLinus Torvalds 				skb->csum = skb_copy_and_csum_bits(
11521da177e4SLinus Torvalds 					skb_prev, maxfraglen,
11538d5930dfSAl Viro 					data + transhdrlen, fraggap);
11541da177e4SLinus Torvalds 				skb_prev->csum = csum_sub(skb_prev->csum,
11551da177e4SLinus Torvalds 							  skb->csum);
11561da177e4SLinus Torvalds 				data += fraggap;
1157e9fa4f7bSHerbert Xu 				pskb_trim_unique(skb_prev, maxfraglen);
11581da177e4SLinus Torvalds 			}
11591da177e4SLinus Torvalds 
116015e36f5bSWillem de Bruijn 			copy = datalen - transhdrlen - fraggap - pagedlen;
11611da177e4SLinus Torvalds 			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
11621da177e4SLinus Torvalds 				err = -EFAULT;
11631da177e4SLinus Torvalds 				kfree_skb(skb);
11641da177e4SLinus Torvalds 				goto error;
11651da177e4SLinus Torvalds 			}
11661da177e4SLinus Torvalds 
11671da177e4SLinus Torvalds 			offset += copy;
116815e36f5bSWillem de Bruijn 			length -= copy + transhdrlen;
11691da177e4SLinus Torvalds 			transhdrlen = 0;
11701da177e4SLinus Torvalds 			exthdrlen = 0;
11711da177e4SLinus Torvalds 			csummode = CHECKSUM_NONE;
11721da177e4SLinus Torvalds 
117352900d22SWillem de Bruijn 			/* only the initial fragment is time stamped */
117452900d22SWillem de Bruijn 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
117552900d22SWillem de Bruijn 			cork->tx_flags = 0;
117652900d22SWillem de Bruijn 			skb_shinfo(skb)->tskey = tskey;
117752900d22SWillem de Bruijn 			tskey = 0;
117852900d22SWillem de Bruijn 			skb_zcopy_set(skb, uarg, &extra_uref);
117952900d22SWillem de Bruijn 
11800dec879fSJulian Anastasov 			if ((flags & MSG_CONFIRM) && !skb_prev)
11810dec879fSJulian Anastasov 				skb_set_dst_pending_confirm(skb, 1);
11820dec879fSJulian Anastasov 
11831da177e4SLinus Torvalds 			/*
11841da177e4SLinus Torvalds 			 * Put the packet on the pending queue.
11851da177e4SLinus Torvalds 			 */
1186694aba69SEric Dumazet 			if (!skb->destructor) {
1187694aba69SEric Dumazet 				skb->destructor = sock_wfree;
1188694aba69SEric Dumazet 				skb->sk = sk;
1189694aba69SEric Dumazet 				wmem_alloc_delta += skb->truesize;
1190694aba69SEric Dumazet 			}
11911470ddf7SHerbert Xu 			__skb_queue_tail(queue, skb);
11921da177e4SLinus Torvalds 			continue;
11931da177e4SLinus Torvalds 		}
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds 		if (copy > length)
11961da177e4SLinus Torvalds 			copy = length;
11971da177e4SLinus Torvalds 
1198113f99c3SWillem de Bruijn 		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1199113f99c3SWillem de Bruijn 		    skb_tailroom(skb) >= copy) {
12001da177e4SLinus Torvalds 			unsigned int off;
12011da177e4SLinus Torvalds 
12021da177e4SLinus Torvalds 			off = skb->len;
12031da177e4SLinus Torvalds 			if (getfrag(from, skb_put(skb, copy),
12041da177e4SLinus Torvalds 					offset, copy, off, skb) < 0) {
12051da177e4SLinus Torvalds 				__skb_trim(skb, off);
12061da177e4SLinus Torvalds 				err = -EFAULT;
12071da177e4SLinus Torvalds 				goto error;
12081da177e4SLinus Torvalds 			}
12097da0dde6SDavid Howells 		} else if (flags & MSG_SPLICE_PAGES) {
12107da0dde6SDavid Howells 			struct msghdr *msg = from;
12117da0dde6SDavid Howells 
12127da0dde6SDavid Howells 			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
12137da0dde6SDavid Howells 						   sk->sk_allocation);
12147da0dde6SDavid Howells 			if (err < 0)
12157da0dde6SDavid Howells 				goto error;
12167da0dde6SDavid Howells 			copy = err;
12177da0dde6SDavid Howells 			wmem_alloc_delta += copy;
1218c445f31bSPavel Begunkov 		} else if (!zc) {
12191da177e4SLinus Torvalds 			int i = skb_shinfo(skb)->nr_frags;
12201da177e4SLinus Torvalds 
12211da177e4SLinus Torvalds 			err = -ENOMEM;
12225640f768SEric Dumazet 			if (!sk_page_frag_refill(sk, pfrag))
12231da177e4SLinus Torvalds 				goto error;
12241da177e4SLinus Torvalds 
1225c445f31bSPavel Begunkov 			skb_zcopy_downgrade_managed(skb);
12265640f768SEric Dumazet 			if (!skb_can_coalesce(skb, i, pfrag->page,
12275640f768SEric Dumazet 					      pfrag->offset)) {
12281da177e4SLinus Torvalds 				err = -EMSGSIZE;
12295640f768SEric Dumazet 				if (i == MAX_SKB_FRAGS)
12301da177e4SLinus Torvalds 					goto error;
12315640f768SEric Dumazet 
12325640f768SEric Dumazet 				__skb_fill_page_desc(skb, i, pfrag->page,
12335640f768SEric Dumazet 						     pfrag->offset, 0);
12345640f768SEric Dumazet 				skb_shinfo(skb)->nr_frags = ++i;
12355640f768SEric Dumazet 				get_page(pfrag->page);
12361da177e4SLinus Torvalds 			}
12375640f768SEric Dumazet 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
12385640f768SEric Dumazet 			if (getfrag(from,
12395640f768SEric Dumazet 				    page_address(pfrag->page) + pfrag->offset,
12405640f768SEric Dumazet 				    offset, copy, skb->len, skb) < 0)
12415640f768SEric Dumazet 				goto error_efault;
12425640f768SEric Dumazet 
12435640f768SEric Dumazet 			pfrag->offset += copy;
12445640f768SEric Dumazet 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1245ede57d58SRichard Gobert 			skb_len_add(skb, copy);
1246694aba69SEric Dumazet 			wmem_alloc_delta += copy;
1247b5947e5dSWillem de Bruijn 		} else {
1248b5947e5dSWillem de Bruijn 			err = skb_zerocopy_iter_dgram(skb, from, copy);
1249b5947e5dSWillem de Bruijn 			if (err < 0)
1250b5947e5dSWillem de Bruijn 				goto error;
12511da177e4SLinus Torvalds 		}
12521da177e4SLinus Torvalds 		offset += copy;
12531da177e4SLinus Torvalds 		length -= copy;
12541da177e4SLinus Torvalds 	}
12551da177e4SLinus Torvalds 
12569e8445a5SPaolo Abeni 	if (wmem_alloc_delta)
1257694aba69SEric Dumazet 		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
12581da177e4SLinus Torvalds 	return 0;
12591da177e4SLinus Torvalds 
12605640f768SEric Dumazet error_efault:
12615640f768SEric Dumazet 	err = -EFAULT;
12621da177e4SLinus Torvalds error:
12638e044917SJonathan Lemon 	net_zcopy_put_abort(uarg, extra_uref);
12641470ddf7SHerbert Xu 	cork->length -= length;
12655e38e270SPavel Emelyanov 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1266694aba69SEric Dumazet 	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
12671da177e4SLinus Torvalds 	return err;
12681da177e4SLinus Torvalds }
12691da177e4SLinus Torvalds 
12701470ddf7SHerbert Xu static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
12711470ddf7SHerbert Xu 			 struct ipcm_cookie *ipc, struct rtable **rtp)
12721470ddf7SHerbert Xu {
1273f6d8bd05SEric Dumazet 	struct ip_options_rcu *opt;
12741470ddf7SHerbert Xu 	struct rtable *rt;
12751470ddf7SHerbert Xu 
12769783ccd0SGao Feng 	rt = *rtp;
12779783ccd0SGao Feng 	if (unlikely(!rt))
12789783ccd0SGao Feng 		return -EFAULT;
12799783ccd0SGao Feng 
12801470ddf7SHerbert Xu 	/*
12811470ddf7SHerbert Xu 	 * setup for corking.
12821470ddf7SHerbert Xu 	 */
12831470ddf7SHerbert Xu 	opt = ipc->opt;
12841470ddf7SHerbert Xu 	if (opt) {
128551456b29SIan Morris 		if (!cork->opt) {
12861470ddf7SHerbert Xu 			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
12871470ddf7SHerbert Xu 					    sk->sk_allocation);
128851456b29SIan Morris 			if (unlikely(!cork->opt))
12891470ddf7SHerbert Xu 				return -ENOBUFS;
12901470ddf7SHerbert Xu 		}
1291f6d8bd05SEric Dumazet 		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
12921470ddf7SHerbert Xu 		cork->flags |= IPCORK_OPT;
12931470ddf7SHerbert Xu 		cork->addr = ipc->addr;
12941470ddf7SHerbert Xu 	}
12959783ccd0SGao Feng 
1296482fc609SHannes Frederic Sowa 	cork->fragsize = ip_sk_use_pmtu(sk) ?
1297501a90c9SEric Dumazet 			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1298501a90c9SEric Dumazet 
1299501a90c9SEric Dumazet 	if (!inetdev_valid_mtu(cork->fragsize))
1300501a90c9SEric Dumazet 		return -ENETUNREACH;
1301bec1f6f6SWillem de Bruijn 
1302fbf47813SWillem de Bruijn 	cork->gso_size = ipc->gso_size;
1303501a90c9SEric Dumazet 
13041470ddf7SHerbert Xu 	cork->dst = &rt->dst;
1305501a90c9SEric Dumazet 	/* We stole this route, caller should not release it. */
1306501a90c9SEric Dumazet 	*rtp = NULL;
1307501a90c9SEric Dumazet 
13081470ddf7SHerbert Xu 	cork->length = 0;
1309aa661581SFrancesco Fusco 	cork->ttl = ipc->ttl;
1310aa661581SFrancesco Fusco 	cork->tos = ipc->tos;
1311c6af0c22SWillem de Bruijn 	cork->mark = ipc->sockc.mark;
1312aa661581SFrancesco Fusco 	cork->priority = ipc->priority;
1313bc969a97SJesus Sanchez-Palencia 	cork->transmit_time = ipc->sockc.transmit_time;
1314678ca42dSWillem de Bruijn 	cork->tx_flags = 0;
1315678ca42dSWillem de Bruijn 	sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
13161470ddf7SHerbert Xu 
13171470ddf7SHerbert Xu 	return 0;
13181470ddf7SHerbert Xu }
13191470ddf7SHerbert Xu 
13201470ddf7SHerbert Xu /*
1321c49cf266SDavid Howells  *	ip_append_data() can make one large IP datagram from many pieces of
1322c49cf266SDavid Howells  *	data.  Each piece will be held on the socket until
1323c49cf266SDavid Howells  *	ip_push_pending_frames() is called. Each piece can be a page or
1324c49cf266SDavid Howells  *	non-page data.
13251470ddf7SHerbert Xu  *
13261470ddf7SHerbert Xu  *	Not only UDP, other transport protocols - e.g. raw sockets - can use
13271470ddf7SHerbert Xu  *	this interface potentially.
13281470ddf7SHerbert Xu  *
13291470ddf7SHerbert Xu  *	LATER: length must be adjusted by pad at tail, when it is required.
13301470ddf7SHerbert Xu  */
1331f5fca608SDavid S. Miller int ip_append_data(struct sock *sk, struct flowi4 *fl4,
13321470ddf7SHerbert Xu 		   int getfrag(void *from, char *to, int offset, int len,
13331470ddf7SHerbert Xu 			       int odd, struct sk_buff *skb),
13341470ddf7SHerbert Xu 		   void *from, int length, int transhdrlen,
13351470ddf7SHerbert Xu 		   struct ipcm_cookie *ipc, struct rtable **rtp,
13361470ddf7SHerbert Xu 		   unsigned int flags)
13371470ddf7SHerbert Xu {
13381470ddf7SHerbert Xu 	struct inet_sock *inet = inet_sk(sk);
13391470ddf7SHerbert Xu 	int err;
13401470ddf7SHerbert Xu 
13411470ddf7SHerbert Xu 	if (flags&MSG_PROBE)
13421470ddf7SHerbert Xu 		return 0;
13431470ddf7SHerbert Xu 
13441470ddf7SHerbert Xu 	if (skb_queue_empty(&sk->sk_write_queue)) {
1345bdc712b4SDavid S. Miller 		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
13461470ddf7SHerbert Xu 		if (err)
13471470ddf7SHerbert Xu 			return err;
13481470ddf7SHerbert Xu 	} else {
13491470ddf7SHerbert Xu 		transhdrlen = 0;
13501470ddf7SHerbert Xu 	}
13511470ddf7SHerbert Xu 
13525640f768SEric Dumazet 	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
13535640f768SEric Dumazet 				sk_page_frag(sk), getfrag,
13541470ddf7SHerbert Xu 				from, length, transhdrlen, flags);
13551470ddf7SHerbert Xu }
13561470ddf7SHerbert Xu 
13571470ddf7SHerbert Xu static void ip_cork_release(struct inet_cork *cork)
1358429f08e9SPavel Emelyanov {
13591470ddf7SHerbert Xu 	cork->flags &= ~IPCORK_OPT;
13601470ddf7SHerbert Xu 	kfree(cork->opt);
13611470ddf7SHerbert Xu 	cork->opt = NULL;
13621470ddf7SHerbert Xu 	dst_release(cork->dst);
13631470ddf7SHerbert Xu 	cork->dst = NULL;
1364429f08e9SPavel Emelyanov }
1365429f08e9SPavel Emelyanov 
13661da177e4SLinus Torvalds /*
13671da177e4SLinus Torvalds  *	Combined all pending IP fragments on the socket as one IP datagram
13681da177e4SLinus Torvalds  *	and push them out.
13691da177e4SLinus Torvalds  */
13701c32c5adSHerbert Xu struct sk_buff *__ip_make_skb(struct sock *sk,
137177968b78SDavid S. Miller 			      struct flowi4 *fl4,
13721470ddf7SHerbert Xu 			      struct sk_buff_head *queue,
13731470ddf7SHerbert Xu 			      struct inet_cork *cork)
13741da177e4SLinus Torvalds {
13751da177e4SLinus Torvalds 	struct sk_buff *skb, *tmp_skb;
13761da177e4SLinus Torvalds 	struct sk_buff **tail_skb;
13771da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
13780388b004SPavel Emelyanov 	struct net *net = sock_net(sk);
13791da177e4SLinus Torvalds 	struct ip_options *opt = NULL;
13801470ddf7SHerbert Xu 	struct rtable *rt = (struct rtable *)cork->dst;
13811da177e4SLinus Torvalds 	struct iphdr *iph;
138276ab608dSAlexey Dobriyan 	__be16 df = 0;
13831da177e4SLinus Torvalds 	__u8 ttl;
13841da177e4SLinus Torvalds 
138551456b29SIan Morris 	skb = __skb_dequeue(queue);
138651456b29SIan Morris 	if (!skb)
13871da177e4SLinus Torvalds 		goto out;
13881da177e4SLinus Torvalds 	tail_skb = &(skb_shinfo(skb)->frag_list);
13891da177e4SLinus Torvalds 
13901da177e4SLinus Torvalds 	/* move skb->data to ip header from ext header */
1391d56f90a7SArnaldo Carvalho de Melo 	if (skb->data < skb_network_header(skb))
1392bbe735e4SArnaldo Carvalho de Melo 		__skb_pull(skb, skb_network_offset(skb));
13931470ddf7SHerbert Xu 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1394cfe1fc77SArnaldo Carvalho de Melo 		__skb_pull(tmp_skb, skb_network_header_len(skb));
13951da177e4SLinus Torvalds 		*tail_skb = tmp_skb;
13961da177e4SLinus Torvalds 		tail_skb = &(tmp_skb->next);
13971da177e4SLinus Torvalds 		skb->len += tmp_skb->len;
13981da177e4SLinus Torvalds 		skb->data_len += tmp_skb->len;
13991da177e4SLinus Torvalds 		skb->truesize += tmp_skb->truesize;
14001da177e4SLinus Torvalds 		tmp_skb->destructor = NULL;
14011da177e4SLinus Torvalds 		tmp_skb->sk = NULL;
14021da177e4SLinus Torvalds 	}
14031da177e4SLinus Torvalds 
14041da177e4SLinus Torvalds 	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
14051da177e4SLinus Torvalds 	 * to fragment the frame generated here. No matter, what transforms
14061da177e4SLinus Torvalds 	 * how transforms change size of the packet, it will come out.
14071da177e4SLinus Torvalds 	 */
140860ff7467SWANG Cong 	skb->ignore_df = ip_sk_ignore_df(sk);
14091da177e4SLinus Torvalds 
14101da177e4SLinus Torvalds 	/* DF bit is set when we want to see DF on outgoing frames.
141160ff7467SWANG Cong 	 * If ignore_df is set too, we still allow to fragment this frame
14121da177e4SLinus Torvalds 	 * locally. */
1413482fc609SHannes Frederic Sowa 	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1414482fc609SHannes Frederic Sowa 	    inet->pmtudisc == IP_PMTUDISC_PROBE ||
1415d8d1f30bSChangli Gao 	    (skb->len <= dst_mtu(&rt->dst) &&
1416d8d1f30bSChangli Gao 	     ip_dont_fragment(sk, &rt->dst)))
14171da177e4SLinus Torvalds 		df = htons(IP_DF);
14181da177e4SLinus Torvalds 
14191470ddf7SHerbert Xu 	if (cork->flags & IPCORK_OPT)
14201470ddf7SHerbert Xu 		opt = cork->opt;
14211da177e4SLinus Torvalds 
1422aa661581SFrancesco Fusco 	if (cork->ttl != 0)
1423aa661581SFrancesco Fusco 		ttl = cork->ttl;
1424aa661581SFrancesco Fusco 	else if (rt->rt_type == RTN_MULTICAST)
14251da177e4SLinus Torvalds 		ttl = inet->mc_ttl;
14261da177e4SLinus Torvalds 	else
1427d8d1f30bSChangli Gao 		ttl = ip_select_ttl(inet, &rt->dst);
14281da177e4SLinus Torvalds 
1429749154aaSAnsis Atteka 	iph = ip_hdr(skb);
14301da177e4SLinus Torvalds 	iph->version = 4;
14311da177e4SLinus Torvalds 	iph->ihl = 5;
1432aa661581SFrancesco Fusco 	iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
14331da177e4SLinus Torvalds 	iph->frag_off = df;
14341da177e4SLinus Torvalds 	iph->ttl = ttl;
14351da177e4SLinus Torvalds 	iph->protocol = sk->sk_protocol;
143684f9307cSEric Dumazet 	ip_copy_addrs(iph, fl4);
1437b6a7719aSHannes Frederic Sowa 	ip_select_ident(net, skb, sk);
14381da177e4SLinus Torvalds 
143922f728f8SDavid S. Miller 	if (opt) {
144022f728f8SDavid S. Miller 		iph->ihl += opt->optlen >> 2;
14414f0e3040SJakub Kicinski 		ip_options_build(skb, opt, cork->addr, rt);
144222f728f8SDavid S. Miller 	}
144322f728f8SDavid S. Miller 
1444aa661581SFrancesco Fusco 	skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1445c6af0c22SWillem de Bruijn 	skb->mark = cork->mark;
1446bc969a97SJesus Sanchez-Palencia 	skb->tstamp = cork->transmit_time;
1447a21bba94SEric Dumazet 	/*
1448a21bba94SEric Dumazet 	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1449a21bba94SEric Dumazet 	 * on dst refcount
1450a21bba94SEric Dumazet 	 */
14511470ddf7SHerbert Xu 	cork->dst = NULL;
1452d8d1f30bSChangli Gao 	skb_dst_set(skb, &rt->dst);
14531da177e4SLinus Torvalds 
145499e5acaeSZiyang Xuan 	if (iph->protocol == IPPROTO_ICMP) {
145599e5acaeSZiyang Xuan 		u8 icmp_type;
145699e5acaeSZiyang Xuan 
145799e5acaeSZiyang Xuan 		/* For such sockets, transhdrlen is zero when do ip_append_data(),
145899e5acaeSZiyang Xuan 		 * so icmphdr does not in skb linear region and can not get icmp_type
145999e5acaeSZiyang Xuan 		 * by icmp_hdr(skb)->type.
146099e5acaeSZiyang Xuan 		 */
146199e5acaeSZiyang Xuan 		if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
146299e5acaeSZiyang Xuan 			icmp_type = fl4->fl4_icmp_type;
146399e5acaeSZiyang Xuan 		else
146499e5acaeSZiyang Xuan 			icmp_type = icmp_hdr(skb)->type;
146599e5acaeSZiyang Xuan 		icmp_out_count(net, icmp_type);
146699e5acaeSZiyang Xuan 	}
146796793b48SDavid L Stevens 
14681c32c5adSHerbert Xu 	ip_cork_release(cork);
14691c32c5adSHerbert Xu out:
14701c32c5adSHerbert Xu 	return skb;
14711c32c5adSHerbert Xu }
14721c32c5adSHerbert Xu 
1473b5ec8eeaSEric Dumazet int ip_send_skb(struct net *net, struct sk_buff *skb)
14741c32c5adSHerbert Xu {
14751c32c5adSHerbert Xu 	int err;
14761c32c5adSHerbert Xu 
147733224b16SEric W. Biederman 	err = ip_local_out(net, skb->sk, skb);
14781da177e4SLinus Torvalds 	if (err) {
14791da177e4SLinus Torvalds 		if (err > 0)
14806ce9e7b5SEric Dumazet 			err = net_xmit_errno(err);
14811da177e4SLinus Torvalds 		if (err)
14821c32c5adSHerbert Xu 			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
14831da177e4SLinus Torvalds 	}
14841da177e4SLinus Torvalds 
14851da177e4SLinus Torvalds 	return err;
14861da177e4SLinus Torvalds }
14871da177e4SLinus Torvalds 
148877968b78SDavid S. Miller int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
14891470ddf7SHerbert Xu {
14901c32c5adSHerbert Xu 	struct sk_buff *skb;
14911c32c5adSHerbert Xu 
149277968b78SDavid S. Miller 	skb = ip_finish_skb(sk, fl4);
14931c32c5adSHerbert Xu 	if (!skb)
14941c32c5adSHerbert Xu 		return 0;
14951c32c5adSHerbert Xu 
14961c32c5adSHerbert Xu 	/* Netfilter gets whole the not fragmented skb. */
1497b5ec8eeaSEric Dumazet 	return ip_send_skb(sock_net(sk), skb);
14981470ddf7SHerbert Xu }
14991470ddf7SHerbert Xu 
15001da177e4SLinus Torvalds /*
15011da177e4SLinus Torvalds  *	Throw away all pending data on the socket.
15021da177e4SLinus Torvalds  */
15031470ddf7SHerbert Xu static void __ip_flush_pending_frames(struct sock *sk,
15041470ddf7SHerbert Xu 				      struct sk_buff_head *queue,
15051470ddf7SHerbert Xu 				      struct inet_cork *cork)
15061da177e4SLinus Torvalds {
15071da177e4SLinus Torvalds 	struct sk_buff *skb;
15081da177e4SLinus Torvalds 
15091470ddf7SHerbert Xu 	while ((skb = __skb_dequeue_tail(queue)) != NULL)
15101da177e4SLinus Torvalds 		kfree_skb(skb);
15111da177e4SLinus Torvalds 
15121470ddf7SHerbert Xu 	ip_cork_release(cork);
15131470ddf7SHerbert Xu }
15141470ddf7SHerbert Xu 
15151470ddf7SHerbert Xu void ip_flush_pending_frames(struct sock *sk)
15161470ddf7SHerbert Xu {
1517bdc712b4SDavid S. Miller 	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
15181da177e4SLinus Torvalds }
15191da177e4SLinus Torvalds 
15201c32c5adSHerbert Xu struct sk_buff *ip_make_skb(struct sock *sk,
152177968b78SDavid S. Miller 			    struct flowi4 *fl4,
15221c32c5adSHerbert Xu 			    int getfrag(void *from, char *to, int offset,
15231c32c5adSHerbert Xu 					int len, int odd, struct sk_buff *skb),
15241c32c5adSHerbert Xu 			    void *from, int length, int transhdrlen,
15251c32c5adSHerbert Xu 			    struct ipcm_cookie *ipc, struct rtable **rtp,
15261cd7884dSWillem de Bruijn 			    struct inet_cork *cork, unsigned int flags)
15271c32c5adSHerbert Xu {
15281c32c5adSHerbert Xu 	struct sk_buff_head queue;
15291c32c5adSHerbert Xu 	int err;
15301c32c5adSHerbert Xu 
15311c32c5adSHerbert Xu 	if (flags & MSG_PROBE)
15321c32c5adSHerbert Xu 		return NULL;
15331c32c5adSHerbert Xu 
15341c32c5adSHerbert Xu 	__skb_queue_head_init(&queue);
15351c32c5adSHerbert Xu 
15361cd7884dSWillem de Bruijn 	cork->flags = 0;
15371cd7884dSWillem de Bruijn 	cork->addr = 0;
15381cd7884dSWillem de Bruijn 	cork->opt = NULL;
15391cd7884dSWillem de Bruijn 	err = ip_setup_cork(sk, cork, ipc, rtp);
15401c32c5adSHerbert Xu 	if (err)
15411c32c5adSHerbert Xu 		return ERR_PTR(err);
15421c32c5adSHerbert Xu 
15431cd7884dSWillem de Bruijn 	err = __ip_append_data(sk, fl4, &queue, cork,
15445640f768SEric Dumazet 			       &current->task_frag, getfrag,
15451c32c5adSHerbert Xu 			       from, length, transhdrlen, flags);
15461c32c5adSHerbert Xu 	if (err) {
15471cd7884dSWillem de Bruijn 		__ip_flush_pending_frames(sk, &queue, cork);
15481c32c5adSHerbert Xu 		return ERR_PTR(err);
15491c32c5adSHerbert Xu 	}
15501c32c5adSHerbert Xu 
15511cd7884dSWillem de Bruijn 	return __ip_make_skb(sk, fl4, &queue, cork);
15521c32c5adSHerbert Xu }
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds /*
15551da177e4SLinus Torvalds  *	Fetch data from kernel space and fill in checksum if needed.
15561da177e4SLinus Torvalds  */
15571da177e4SLinus Torvalds static int ip_reply_glue_bits(void *dptr, char *to, int offset,
15581da177e4SLinus Torvalds 			      int len, int odd, struct sk_buff *skb)
15591da177e4SLinus Torvalds {
15605084205fSAl Viro 	__wsum csum;
15611da177e4SLinus Torvalds 
1562cc44c17bSAl Viro 	csum = csum_partial_copy_nocheck(dptr+offset, to, len);
15631da177e4SLinus Torvalds 	skb->csum = csum_block_add(skb->csum, csum, odd);
15641da177e4SLinus Torvalds 	return 0;
15651da177e4SLinus Torvalds }
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds /*
15681da177e4SLinus Torvalds  *	Generic function to send a packet as reply to another packet.
1569be9f4a44SEric Dumazet  *	Used to send some TCP resets/acks so far.
15701da177e4SLinus Torvalds  */
1571bdbbb852SEric Dumazet void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
157224a2d43dSEric Dumazet 			   const struct ip_options *sopt,
157324a2d43dSEric Dumazet 			   __be32 daddr, __be32 saddr,
157424a2d43dSEric Dumazet 			   const struct ip_reply_arg *arg,
1575c0a8966eSAntoine Tenart 			   unsigned int len, u64 transmit_time, u32 txhash)
15761da177e4SLinus Torvalds {
1577f6d8bd05SEric Dumazet 	struct ip_options_data replyopts;
15781da177e4SLinus Torvalds 	struct ipcm_cookie ipc;
157977968b78SDavid S. Miller 	struct flowi4 fl4;
1580511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
1581bdbbb852SEric Dumazet 	struct net *net = sock_net(sk);
1582be9f4a44SEric Dumazet 	struct sk_buff *nskb;
15834062090eSVasily Averin 	int err;
1584f7ba868bSDavid Ahern 	int oif;
15851da177e4SLinus Torvalds 
158691ed1e66SPaolo Abeni 	if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
15871da177e4SLinus Torvalds 		return;
15881da177e4SLinus Torvalds 
158935178206SWillem de Bruijn 	ipcm_init(&ipc);
15900a5ebb80SDavid S. Miller 	ipc.addr = daddr;
1591d6fb396cSEric Dumazet 	ipc.sockc.transmit_time = transmit_time;
15921da177e4SLinus Torvalds 
1593f6d8bd05SEric Dumazet 	if (replyopts.opt.opt.optlen) {
15941da177e4SLinus Torvalds 		ipc.opt = &replyopts.opt;
15951da177e4SLinus Torvalds 
1596f6d8bd05SEric Dumazet 		if (replyopts.opt.opt.srr)
1597f6d8bd05SEric Dumazet 			daddr = replyopts.opt.opt.faddr;
15981da177e4SLinus Torvalds 	}
15991da177e4SLinus Torvalds 
1600f7ba868bSDavid Ahern 	oif = arg->bound_dev_if;
16019b6c14d5SDavid Ahern 	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
16029b6c14d5SDavid Ahern 		oif = skb->skb_iif;
1603f7ba868bSDavid Ahern 
1604f7ba868bSDavid Ahern 	flowi4_init_output(&fl4, oif,
160500483690SJon Maxwell 			   IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
160666b13d99SEric Dumazet 			   RT_TOS(arg->tos),
1607be9f4a44SEric Dumazet 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1608538de0e0SDavid S. Miller 			   ip_reply_arg_flowi_flags(arg),
160970e73416SDavid S. Miller 			   daddr, saddr,
1610e2d118a1SLorenzo Colitti 			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1611e2d118a1SLorenzo Colitti 			   arg->uid);
16123df98d79SPaul Moore 	security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1613e22aa148Ssewookseo 	rt = ip_route_output_flow(net, &fl4, sk);
1614b23dd4feSDavid S. Miller 	if (IS_ERR(rt))
16151da177e4SLinus Torvalds 		return;
16161da177e4SLinus Torvalds 
1617ba9e04a7SWei Wang 	inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
16181da177e4SLinus Torvalds 
1619eddc9ec5SArnaldo Carvalho de Melo 	sk->sk_protocol = ip_hdr(skb)->protocol;
1620f0e48dbfSPatrick McHardy 	sk->sk_bound_dev_if = arg->bound_dev_if;
16211227c177SKuniyuki Iwashima 	sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
16220da7536fSWillem de Bruijn 	ipc.sockc.mark = fl4.flowi4_mark;
16234062090eSVasily Averin 	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
16244062090eSVasily Averin 			     len, 0, &ipc, &rt, MSG_DONTWAIT);
16254062090eSVasily Averin 	if (unlikely(err)) {
16264062090eSVasily Averin 		ip_flush_pending_frames(sk);
16274062090eSVasily Averin 		goto out;
16284062090eSVasily Averin 	}
16294062090eSVasily Averin 
1630be9f4a44SEric Dumazet 	nskb = skb_peek(&sk->sk_write_queue);
1631be9f4a44SEric Dumazet 	if (nskb) {
16321da177e4SLinus Torvalds 		if (arg->csumoffset >= 0)
1633be9f4a44SEric Dumazet 			*((__sum16 *)skb_transport_header(nskb) +
1634be9f4a44SEric Dumazet 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
16359c70220bSArnaldo Carvalho de Melo 								arg->csum));
1636be9f4a44SEric Dumazet 		nskb->ip_summed = CHECKSUM_NONE;
1637d98d58a0SMartin KaFai Lau 		nskb->mono_delivery_time = !!transmit_time;
1638c0a8966eSAntoine Tenart 		if (txhash)
1639c0a8966eSAntoine Tenart 			skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
164077968b78SDavid S. Miller 		ip_push_pending_frames(sk, &fl4);
16411da177e4SLinus Torvalds 	}
16424062090eSVasily Averin out:
16431da177e4SLinus Torvalds 	ip_rt_put(rt);
16441da177e4SLinus Torvalds }
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds void __init ip_init(void)
16471da177e4SLinus Torvalds {
16481da177e4SLinus Torvalds 	ip_rt_init();
16491da177e4SLinus Torvalds 	inet_initpeers();
16501da177e4SLinus Torvalds 
165172c1d3bdSWANG Cong #if defined(CONFIG_IP_MULTICAST)
165272c1d3bdSWANG Cong 	igmp_mc_init();
16531da177e4SLinus Torvalds #endif
16541da177e4SLinus Torvalds }
1655