xref: /openbmc/linux/net/ipv4/ip_output.c (revision c0a8966e2bc7d31f77a7246947ebc09c1ff06066)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		The Internet Protocol (IP) output module.
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Donald Becker, <becker@super.org>
121da177e4SLinus Torvalds  *		Alan Cox, <Alan.Cox@linux.org>
131da177e4SLinus Torvalds  *		Richard Underwood
141da177e4SLinus Torvalds  *		Stefan Becker, <stefanb@yello.ping.de>
151da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
161da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
171da177e4SLinus Torvalds  *		Hirokazu Takahashi, <taka@valinux.co.jp>
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  *	See ip_input.c for original log
201da177e4SLinus Torvalds  *
211da177e4SLinus Torvalds  *	Fixes:
221da177e4SLinus Torvalds  *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
231da177e4SLinus Torvalds  *		Mike Kilburn	:	htons() missing in ip_build_xmit.
241da177e4SLinus Torvalds  *		Bradford Johnson:	Fix faulty handling of some frames when
251da177e4SLinus Torvalds  *					no route is found.
261da177e4SLinus Torvalds  *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
271da177e4SLinus Torvalds  *					(in case if packet not accepted by
281da177e4SLinus Torvalds  *					output firewall rules)
291da177e4SLinus Torvalds  *		Mike McLagan	:	Routing by source
301da177e4SLinus Torvalds  *		Alexey Kuznetsov:	use new route cache
311da177e4SLinus Torvalds  *		Andi Kleen:		Fix broken PMTU recovery and remove
321da177e4SLinus Torvalds  *					some redundant tests.
331da177e4SLinus Torvalds  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
341da177e4SLinus Torvalds  *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
351da177e4SLinus Torvalds  *		Andi Kleen	:	Split fast and slow ip_build_xmit path
361da177e4SLinus Torvalds  *					for decreased register pressure on x86
37a66e04ceSBhaskar Chowdhury  *					and more readability.
381da177e4SLinus Torvalds  *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
391da177e4SLinus Torvalds  *					silently drop skb instead of failing with -EPERM.
401da177e4SLinus Torvalds  *		Detlev Wengorz	:	Copy protocol for fragments.
411da177e4SLinus Torvalds  *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
421da177e4SLinus Torvalds  *					datagrams.
431da177e4SLinus Torvalds  *		Hirokazu Takahashi:	sendfile() on UDP works now.
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds 
467c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
471da177e4SLinus Torvalds #include <linux/module.h>
481da177e4SLinus Torvalds #include <linux/types.h>
491da177e4SLinus Torvalds #include <linux/kernel.h>
501da177e4SLinus Torvalds #include <linux/mm.h>
511da177e4SLinus Torvalds #include <linux/string.h>
521da177e4SLinus Torvalds #include <linux/errno.h>
53a1f8e7f7SAl Viro #include <linux/highmem.h>
545a0e3ad6STejun Heo #include <linux/slab.h>
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds #include <linux/socket.h>
571da177e4SLinus Torvalds #include <linux/sockios.h>
581da177e4SLinus Torvalds #include <linux/in.h>
591da177e4SLinus Torvalds #include <linux/inet.h>
601da177e4SLinus Torvalds #include <linux/netdevice.h>
611da177e4SLinus Torvalds #include <linux/etherdevice.h>
621da177e4SLinus Torvalds #include <linux/proc_fs.h>
631da177e4SLinus Torvalds #include <linux/stat.h>
641da177e4SLinus Torvalds #include <linux/init.h>
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds #include <net/snmp.h>
671da177e4SLinus Torvalds #include <net/ip.h>
681da177e4SLinus Torvalds #include <net/protocol.h>
691da177e4SLinus Torvalds #include <net/route.h>
70cfacb057SPatrick McHardy #include <net/xfrm.h>
711da177e4SLinus Torvalds #include <linux/skbuff.h>
721da177e4SLinus Torvalds #include <net/sock.h>
731da177e4SLinus Torvalds #include <net/arp.h>
741da177e4SLinus Torvalds #include <net/icmp.h>
751da177e4SLinus Torvalds #include <net/checksum.h>
761da177e4SLinus Torvalds #include <net/inetpeer.h>
77ba9e04a7SWei Wang #include <net/inet_ecn.h>
7814972cbdSRoopa Prabhu #include <net/lwtunnel.h>
7933b48679SDaniel Mack #include <linux/bpf-cgroup.h>
801da177e4SLinus Torvalds #include <linux/igmp.h>
811da177e4SLinus Torvalds #include <linux/netfilter_ipv4.h>
821da177e4SLinus Torvalds #include <linux/netfilter_bridge.h>
831da177e4SLinus Torvalds #include <linux/netlink.h>
846cbb0df7SArnaldo Carvalho de Melo #include <linux/tcp.h>
851da177e4SLinus Torvalds 
86694869b3SEric W. Biederman static int
87694869b3SEric W. Biederman ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
88c5501eb3SFlorian Westphal 	    unsigned int mtu,
89694869b3SEric W. Biederman 	    int (*output)(struct net *, struct sock *, struct sk_buff *));
9049d16b23SAndy Zhou 
911da177e4SLinus Torvalds /* Generate a checksum for an outgoing IP datagram. */
922fbd9679SDenis Efremov void ip_send_check(struct iphdr *iph)
931da177e4SLinus Torvalds {
941da177e4SLinus Torvalds 	iph->check = 0;
951da177e4SLinus Torvalds 	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
961da177e4SLinus Torvalds }
974bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_send_check);
981da177e4SLinus Torvalds 
99cf91a99dSEric W. Biederman int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
100c439cb2eSHerbert Xu {
101c439cb2eSHerbert Xu 	struct iphdr *iph = ip_hdr(skb);
102c439cb2eSHerbert Xu 
103b1a78b9bSXin Long 	iph_set_totlen(iph, skb->len);
104c439cb2eSHerbert Xu 	ip_send_check(iph);
105a8e3e1a9SDavid Ahern 
106a8e3e1a9SDavid Ahern 	/* if egress device is enslaved to an L3 master device pass the
107a8e3e1a9SDavid Ahern 	 * skb to its handler for processing
108a8e3e1a9SDavid Ahern 	 */
109a8e3e1a9SDavid Ahern 	skb = l3mdev_ip_out(sk, skb);
110a8e3e1a9SDavid Ahern 	if (unlikely(!skb))
111a8e3e1a9SDavid Ahern 		return 0;
112a8e3e1a9SDavid Ahern 
113f4180439SEli Cooper 	skb->protocol = htons(ETH_P_IP);
114f4180439SEli Cooper 
11529a26a56SEric W. Biederman 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
11629a26a56SEric W. Biederman 		       net, sk, skb, NULL, skb_dst(skb)->dev,
11713206b6bSEric W. Biederman 		       dst_output);
1187026b1ddSDavid Miller }
1197026b1ddSDavid Miller 
12033224b16SEric W. Biederman int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
121c439cb2eSHerbert Xu {
122c439cb2eSHerbert Xu 	int err;
123c439cb2eSHerbert Xu 
124cf91a99dSEric W. Biederman 	err = __ip_local_out(net, sk, skb);
125c439cb2eSHerbert Xu 	if (likely(err == 1))
12613206b6bSEric W. Biederman 		err = dst_output(net, sk, skb);
127c439cb2eSHerbert Xu 
128c439cb2eSHerbert Xu 	return err;
129c439cb2eSHerbert Xu }
130e2cb77dbSEric W. Biederman EXPORT_SYMBOL_GPL(ip_local_out);
131c439cb2eSHerbert Xu 
132abc17a11SEric Dumazet static inline int ip_select_ttl(const struct inet_sock *inet,
133abc17a11SEric Dumazet 				const struct dst_entry *dst)
1341da177e4SLinus Torvalds {
1351da177e4SLinus Torvalds 	int ttl = inet->uc_ttl;
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 	if (ttl < 0)
138323e126fSDavid S. Miller 		ttl = ip4_dst_hoplimit(dst);
1391da177e4SLinus Torvalds 	return ttl;
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds /*
1431da177e4SLinus Torvalds  *		Add an ip header to a skbuff and send it out.
1441da177e4SLinus Torvalds  *
1451da177e4SLinus Torvalds  */
146cfe673b0SEric Dumazet int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
147de033b7dSWei Wang 			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
148de033b7dSWei Wang 			  u8 tos)
1491da177e4SLinus Torvalds {
150abc17a11SEric Dumazet 	const struct inet_sock *inet = inet_sk(sk);
151511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
15277589ce0SEric W. Biederman 	struct net *net = sock_net(sk);
1531da177e4SLinus Torvalds 	struct iphdr *iph;
1541da177e4SLinus Torvalds 
1551da177e4SLinus Torvalds 	/* Build the IP header. */
156f6d8bd05SEric Dumazet 	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
1578856dfa3SArnaldo Carvalho de Melo 	skb_reset_network_header(skb);
158eddc9ec5SArnaldo Carvalho de Melo 	iph = ip_hdr(skb);
1591da177e4SLinus Torvalds 	iph->version  = 4;
1601da177e4SLinus Torvalds 	iph->ihl      = 5;
161de033b7dSWei Wang 	iph->tos      = tos;
162d8d1f30bSChangli Gao 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
163dd927a26SDavid S. Miller 	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
164dd927a26SDavid S. Miller 	iph->saddr    = saddr;
1651da177e4SLinus Torvalds 	iph->protocol = sk->sk_protocol;
166970a5a3eSEric Dumazet 	/* Do not bother generating IPID for small packets (eg SYNACK) */
167970a5a3eSEric Dumazet 	if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
168cfe673b0SEric Dumazet 		iph->frag_off = htons(IP_DF);
169cfe673b0SEric Dumazet 		iph->id = 0;
170cfe673b0SEric Dumazet 	} else {
171cfe673b0SEric Dumazet 		iph->frag_off = 0;
172970a5a3eSEric Dumazet 		/* TCP packets here are SYNACK with fat IPv4/TCP options.
173970a5a3eSEric Dumazet 		 * Avoid using the hashed IP ident generator.
174970a5a3eSEric Dumazet 		 */
175970a5a3eSEric Dumazet 		if (sk->sk_protocol == IPPROTO_TCP)
1767e3cf084SJason A. Donenfeld 			iph->id = (__force __be16)get_random_u16();
177970a5a3eSEric Dumazet 		else
17877589ce0SEric W. Biederman 			__ip_select_ident(net, iph, 1);
179cfe673b0SEric Dumazet 	}
1801da177e4SLinus Torvalds 
181f6d8bd05SEric Dumazet 	if (opt && opt->opt.optlen) {
182f6d8bd05SEric Dumazet 		iph->ihl += opt->opt.optlen>>2;
1834f0e3040SJakub Kicinski 		ip_options_build(skb, &opt->opt, daddr, rt);
1841da177e4SLinus Torvalds 	}
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds 	skb->priority = sk->sk_priority;
187e05a90ecSJamal Hadi Salim 	if (!skb->mark)
1884a19ec58SLaszlo Attila Toth 		skb->mark = sk->sk_mark;
1891da177e4SLinus Torvalds 
1901da177e4SLinus Torvalds 	/* Send it out. */
19133224b16SEric W. Biederman 	return ip_local_out(net, skb->sk, skb);
1921da177e4SLinus Torvalds }
193d8c97a94SArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
194d8c97a94SArnaldo Carvalho de Melo 
195694869b3SEric W. Biederman static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
1961da177e4SLinus Torvalds {
197adf30907SEric Dumazet 	struct dst_entry *dst = skb_dst(skb);
19880787ebcSMitsuru Chinen 	struct rtable *rt = (struct rtable *)dst;
1991da177e4SLinus Torvalds 	struct net_device *dev = dst->dev;
200c2636b4dSChuck Lever 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
201f6b72b62SDavid S. Miller 	struct neighbour *neigh;
2025c9f7c1dSDavid Ahern 	bool is_v6gw = false;
2031da177e4SLinus Torvalds 
204edf391ffSNeil Horman 	if (rt->rt_type == RTN_MULTICAST) {
2054ba1bf42SEric W. Biederman 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
206edf391ffSNeil Horman 	} else if (rt->rt_type == RTN_BROADCAST)
2074ba1bf42SEric W. Biederman 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
20880787ebcSMitsuru Chinen 
2093b04dddeSStephen Hemminger 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2105678a595SVasily Averin 		skb = skb_expand_head(skb, hh_len);
2115678a595SVasily Averin 		if (!skb)
2121da177e4SLinus Torvalds 			return -ENOMEM;
2131da177e4SLinus Torvalds 	}
2141da177e4SLinus Torvalds 
21514972cbdSRoopa Prabhu 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
21614972cbdSRoopa Prabhu 		int res = lwtunnel_xmit(skb);
21714972cbdSRoopa Prabhu 
21814972cbdSRoopa Prabhu 		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
21914972cbdSRoopa Prabhu 			return res;
22014972cbdSRoopa Prabhu 	}
22114972cbdSRoopa Prabhu 
22209eed119SEric Dumazet 	rcu_read_lock();
2235c9f7c1dSDavid Ahern 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2249871f1adSVasiliy Kulikov 	if (!IS_ERR(neigh)) {
2254ff06203SJulian Anastasov 		int res;
2264ff06203SJulian Anastasov 
2274ff06203SJulian Anastasov 		sock_confirm_neigh(skb, neigh);
2285c9f7c1dSDavid Ahern 		/* if crossing protocols, can not use the cached header */
2295c9f7c1dSDavid Ahern 		res = neigh_output(neigh, skb, is_v6gw);
23009eed119SEric Dumazet 		rcu_read_unlock();
231f2c31e32SEric Dumazet 		return res;
232f2c31e32SEric Dumazet 	}
23309eed119SEric Dumazet 	rcu_read_unlock();
23405e3aa09SDavid S. Miller 
235e87cc472SJoe Perches 	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
236e87cc472SJoe Perches 			    __func__);
2375e187189SMenglong Dong 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
2381da177e4SLinus Torvalds 	return -EINVAL;
2391da177e4SLinus Torvalds }
2401da177e4SLinus Torvalds 
241694869b3SEric W. Biederman static int ip_finish_output_gso(struct net *net, struct sock *sk,
242694869b3SEric W. Biederman 				struct sk_buff *skb, unsigned int mtu)
243c7ba65d7SFlorian Westphal {
24488bebdf5SJason A. Donenfeld 	struct sk_buff *segs, *nskb;
245c7ba65d7SFlorian Westphal 	netdev_features_t features;
246c7ba65d7SFlorian Westphal 	int ret = 0;
247c7ba65d7SFlorian Westphal 
2489ee6c5dcSLance Richardson 	/* common case: seglen is <= mtu
249359ebda2SShmulik Ladkani 	 */
250779b7931SDaniel Axtens 	if (skb_gso_validate_network_len(skb, mtu))
251694869b3SEric W. Biederman 		return ip_finish_output2(net, sk, skb);
252c7ba65d7SFlorian Westphal 
2530ace81ecSLance Richardson 	/* Slowpath -  GSO segment length exceeds the egress MTU.
254c7ba65d7SFlorian Westphal 	 *
2550ace81ecSLance Richardson 	 * This can happen in several cases:
2560ace81ecSLance Richardson 	 *  - Forwarding of a TCP GRO skb, when DF flag is not set.
2570ace81ecSLance Richardson 	 *  - Forwarding of an skb that arrived on a virtualization interface
2580ace81ecSLance Richardson 	 *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
2590ace81ecSLance Richardson 	 *    stack.
2600ace81ecSLance Richardson 	 *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
2610ace81ecSLance Richardson 	 *    interface with a smaller MTU.
2620ace81ecSLance Richardson 	 *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
2630ace81ecSLance Richardson 	 *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
264a66e04ceSBhaskar Chowdhury 	 *    insufficient MTU.
265c7ba65d7SFlorian Westphal 	 */
266c7ba65d7SFlorian Westphal 	features = netif_skb_features(skb);
267a08e7fd9SCambda Zhu 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
268c7ba65d7SFlorian Westphal 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
269330966e5SFlorian Westphal 	if (IS_ERR_OR_NULL(segs)) {
270c7ba65d7SFlorian Westphal 		kfree_skb(skb);
271c7ba65d7SFlorian Westphal 		return -ENOMEM;
272c7ba65d7SFlorian Westphal 	}
273c7ba65d7SFlorian Westphal 
274c7ba65d7SFlorian Westphal 	consume_skb(skb);
275c7ba65d7SFlorian Westphal 
27688bebdf5SJason A. Donenfeld 	skb_list_walk_safe(segs, segs, nskb) {
277c7ba65d7SFlorian Westphal 		int err;
278c7ba65d7SFlorian Westphal 
279a8305bffSDavid S. Miller 		skb_mark_not_on_list(segs);
280694869b3SEric W. Biederman 		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
281c7ba65d7SFlorian Westphal 
282c7ba65d7SFlorian Westphal 		if (err && ret == 0)
283c7ba65d7SFlorian Westphal 			ret = err;
28488bebdf5SJason A. Donenfeld 	}
285c7ba65d7SFlorian Westphal 
286c7ba65d7SFlorian Westphal 	return ret;
287c7ba65d7SFlorian Westphal }
288c7ba65d7SFlorian Westphal 
289956fe219Sbrakmo static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2901da177e4SLinus Torvalds {
291c5501eb3SFlorian Westphal 	unsigned int mtu;
292c5501eb3SFlorian Westphal 
2935c901daaSPatrick McHardy #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
2945c901daaSPatrick McHardy 	/* Policy lookup after SNAT yielded a new policy */
29500db4124SIan Morris 	if (skb_dst(skb)->xfrm) {
29648d5cad8SPatrick McHardy 		IPCB(skb)->flags |= IPSKB_REROUTED;
29713206b6bSEric W. Biederman 		return dst_output(net, sk, skb);
29848d5cad8SPatrick McHardy 	}
2995c901daaSPatrick McHardy #endif
300fedbb6b4SShmulik Ladkani 	mtu = ip_skb_dst_mtu(sk, skb);
301c7ba65d7SFlorian Westphal 	if (skb_is_gso(skb))
302694869b3SEric W. Biederman 		return ip_finish_output_gso(net, sk, skb, mtu);
303c7ba65d7SFlorian Westphal 
304bb4cc1a1SFlorian Westphal 	if (skb->len > mtu || IPCB(skb)->frag_max_size)
305694869b3SEric W. Biederman 		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
306c7ba65d7SFlorian Westphal 
307694869b3SEric W. Biederman 	return ip_finish_output2(net, sk, skb);
3081da177e4SLinus Torvalds }
3091da177e4SLinus Torvalds 
310956fe219Sbrakmo static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
311956fe219Sbrakmo {
312956fe219Sbrakmo 	int ret;
313956fe219Sbrakmo 
314956fe219Sbrakmo 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
315956fe219Sbrakmo 	switch (ret) {
316956fe219Sbrakmo 	case NET_XMIT_SUCCESS:
317956fe219Sbrakmo 		return __ip_finish_output(net, sk, skb);
318956fe219Sbrakmo 	case NET_XMIT_CN:
319956fe219Sbrakmo 		return __ip_finish_output(net, sk, skb) ? : ret;
320956fe219Sbrakmo 	default:
3215e187189SMenglong Dong 		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
322956fe219Sbrakmo 		return ret;
323956fe219Sbrakmo 	}
324956fe219Sbrakmo }
325956fe219Sbrakmo 
32633b48679SDaniel Mack static int ip_mc_finish_output(struct net *net, struct sock *sk,
32733b48679SDaniel Mack 			       struct sk_buff *skb)
32833b48679SDaniel Mack {
3295b18f128SStephen Suryaputra 	struct rtable *new_rt;
330d96ff269SDavid S. Miller 	bool do_cn = false;
331d96ff269SDavid S. Miller 	int ret, err;
33233b48679SDaniel Mack 
33333b48679SDaniel Mack 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
334956fe219Sbrakmo 	switch (ret) {
335956fe219Sbrakmo 	case NET_XMIT_CN:
336d96ff269SDavid S. Miller 		do_cn = true;
337a8eceea8SJoe Perches 		fallthrough;
338d96ff269SDavid S. Miller 	case NET_XMIT_SUCCESS:
339d96ff269SDavid S. Miller 		break;
340956fe219Sbrakmo 	default:
3415e187189SMenglong Dong 		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
34233b48679SDaniel Mack 		return ret;
34333b48679SDaniel Mack 	}
34433b48679SDaniel Mack 
3455b18f128SStephen Suryaputra 	/* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
3465b18f128SStephen Suryaputra 	 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
3475b18f128SStephen Suryaputra 	 * see ipv4_pktinfo_prepare().
3485b18f128SStephen Suryaputra 	 */
3495b18f128SStephen Suryaputra 	new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
3505b18f128SStephen Suryaputra 	if (new_rt) {
3515b18f128SStephen Suryaputra 		new_rt->rt_iif = 0;
3525b18f128SStephen Suryaputra 		skb_dst_drop(skb);
3535b18f128SStephen Suryaputra 		skb_dst_set(skb, &new_rt->dst);
3545b18f128SStephen Suryaputra 	}
3555b18f128SStephen Suryaputra 
356d96ff269SDavid S. Miller 	err = dev_loopback_xmit(net, sk, skb);
357d96ff269SDavid S. Miller 	return (do_cn && err) ? ret : err;
35833b48679SDaniel Mack }
35933b48679SDaniel Mack 
360ede2059dSEric W. Biederman int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3611da177e4SLinus Torvalds {
362511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
363d8d1f30bSChangli Gao 	struct net_device *dev = rt->dst.dev;
3641da177e4SLinus Torvalds 
3651da177e4SLinus Torvalds 	/*
3661da177e4SLinus Torvalds 	 *	If the indicated interface is up and running, send the packet.
3671da177e4SLinus Torvalds 	 */
36888f5cc24SEric W. Biederman 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
3691da177e4SLinus Torvalds 
3701da177e4SLinus Torvalds 	skb->dev = dev;
3711da177e4SLinus Torvalds 	skb->protocol = htons(ETH_P_IP);
3721da177e4SLinus Torvalds 
3731da177e4SLinus Torvalds 	/*
3741da177e4SLinus Torvalds 	 *	Multicasts are looped back for other local users
3751da177e4SLinus Torvalds 	 */
3761da177e4SLinus Torvalds 
3771da177e4SLinus Torvalds 	if (rt->rt_flags&RTCF_MULTICAST) {
3787ad6848cSOctavian Purdila 		if (sk_mc_loop(sk)
3791da177e4SLinus Torvalds #ifdef CONFIG_IP_MROUTE
3801da177e4SLinus Torvalds 		/* Small optimization: do not loopback not local frames,
3811da177e4SLinus Torvalds 		   which returned after forwarding; they will be  dropped
3821da177e4SLinus Torvalds 		   by ip_mr_input in any case.
3831da177e4SLinus Torvalds 		   Note, that local frames are looped back to be delivered
3841da177e4SLinus Torvalds 		   to local recipients.
3851da177e4SLinus Torvalds 
3861da177e4SLinus Torvalds 		   This check is duplicated in ip_mr_input at the moment.
3871da177e4SLinus Torvalds 		 */
3889d4fb27dSJoe Perches 		    &&
3899d4fb27dSJoe Perches 		    ((rt->rt_flags & RTCF_LOCAL) ||
3909d4fb27dSJoe Perches 		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
3911da177e4SLinus Torvalds #endif
3921da177e4SLinus Torvalds 		   ) {
3931da177e4SLinus Torvalds 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3941da177e4SLinus Torvalds 			if (newskb)
3959bbc768aSJan Engelhardt 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
39629a26a56SEric W. Biederman 					net, sk, newskb, NULL, newskb->dev,
39733b48679SDaniel Mack 					ip_mc_finish_output);
3981da177e4SLinus Torvalds 		}
3991da177e4SLinus Torvalds 
4001da177e4SLinus Torvalds 		/* Multicasts with ttl 0 must not go beyond the host */
4011da177e4SLinus Torvalds 
402eddc9ec5SArnaldo Carvalho de Melo 		if (ip_hdr(skb)->ttl == 0) {
4031da177e4SLinus Torvalds 			kfree_skb(skb);
4041da177e4SLinus Torvalds 			return 0;
4051da177e4SLinus Torvalds 		}
4061da177e4SLinus Torvalds 	}
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds 	if (rt->rt_flags&RTCF_BROADCAST) {
4091da177e4SLinus Torvalds 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
4101da177e4SLinus Torvalds 		if (newskb)
41129a26a56SEric W. Biederman 			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
41229a26a56SEric W. Biederman 				net, sk, newskb, NULL, newskb->dev,
41333b48679SDaniel Mack 				ip_mc_finish_output);
4141da177e4SLinus Torvalds 	}
4151da177e4SLinus Torvalds 
41629a26a56SEric W. Biederman 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
41729a26a56SEric W. Biederman 			    net, sk, skb, NULL, skb->dev,
41829a26a56SEric W. Biederman 			    ip_finish_output,
41948d5cad8SPatrick McHardy 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
4201da177e4SLinus Torvalds }
4211da177e4SLinus Torvalds 
422ede2059dSEric W. Biederman int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
4231da177e4SLinus Torvalds {
42428f8bfd1SPhil Sutter 	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
4251bd9bef6SPatrick McHardy 
42688f5cc24SEric W. Biederman 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
4271da177e4SLinus Torvalds 
4281bd9bef6SPatrick McHardy 	skb->dev = dev;
4291bd9bef6SPatrick McHardy 	skb->protocol = htons(ETH_P_IP);
4301bd9bef6SPatrick McHardy 
43129a26a56SEric W. Biederman 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
43228f8bfd1SPhil Sutter 			    net, sk, skb, indev, dev,
43348d5cad8SPatrick McHardy 			    ip_finish_output,
43448d5cad8SPatrick McHardy 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
4351da177e4SLinus Torvalds }
4366585d7dcSBrian Vazquez EXPORT_SYMBOL(ip_output);
4371da177e4SLinus Torvalds 
43884f9307cSEric Dumazet /*
43984f9307cSEric Dumazet  * copy saddr and daddr, possibly using 64bit load/stores
44084f9307cSEric Dumazet  * Equivalent to :
44184f9307cSEric Dumazet  *   iph->saddr = fl4->saddr;
44284f9307cSEric Dumazet  *   iph->daddr = fl4->daddr;
44384f9307cSEric Dumazet  */
44484f9307cSEric Dumazet static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
44584f9307cSEric Dumazet {
44684f9307cSEric Dumazet 	BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
44784f9307cSEric Dumazet 		     offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
4486321c7acSGustavo A. R. Silva 
4496321c7acSGustavo A. R. Silva 	iph->saddr = fl4->saddr;
4506321c7acSGustavo A. R. Silva 	iph->daddr = fl4->daddr;
45184f9307cSEric Dumazet }
45284f9307cSEric Dumazet 
453b0270e91SEric Dumazet /* Note: skb->sk can be different from sk, in case of tunnels */
45469b9e1e0SXin Long int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
45569b9e1e0SXin Long 		    __u8 tos)
4561da177e4SLinus Torvalds {
4571da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
45877589ce0SEric W. Biederman 	struct net *net = sock_net(sk);
459f6d8bd05SEric Dumazet 	struct ip_options_rcu *inet_opt;
460b57ae01aSDavid S. Miller 	struct flowi4 *fl4;
4611da177e4SLinus Torvalds 	struct rtable *rt;
4621da177e4SLinus Torvalds 	struct iphdr *iph;
463ab6e3febSEric Dumazet 	int res;
4641da177e4SLinus Torvalds 
4651da177e4SLinus Torvalds 	/* Skip all of this if the packet is already routed,
4661da177e4SLinus Torvalds 	 * f.e. by something like SCTP.
4671da177e4SLinus Torvalds 	 */
468ab6e3febSEric Dumazet 	rcu_read_lock();
469f6d8bd05SEric Dumazet 	inet_opt = rcu_dereference(inet->inet_opt);
470ea4fc0d6SDavid S. Miller 	fl4 = &fl->u.ip4;
471511c3f92SEric Dumazet 	rt = skb_rtable(skb);
47200db4124SIan Morris 	if (rt)
4731da177e4SLinus Torvalds 		goto packet_routed;
4741da177e4SLinus Torvalds 
4751da177e4SLinus Torvalds 	/* Make sure we can route this packet. */
4761da177e4SLinus Torvalds 	rt = (struct rtable *)__sk_dst_check(sk, 0);
47751456b29SIan Morris 	if (!rt) {
4783ca3c68eSAl Viro 		__be32 daddr;
4791da177e4SLinus Torvalds 
4801da177e4SLinus Torvalds 		/* Use correct destination address if we have options. */
481c720c7e8SEric Dumazet 		daddr = inet->inet_daddr;
482f6d8bd05SEric Dumazet 		if (inet_opt && inet_opt->opt.srr)
483f6d8bd05SEric Dumazet 			daddr = inet_opt->opt.faddr;
4841da177e4SLinus Torvalds 
4851da177e4SLinus Torvalds 		/* If this fails, retransmit mechanism of transport layer will
4861da177e4SLinus Torvalds 		 * keep trying until route appears or the connection times
4871da177e4SLinus Torvalds 		 * itself out.
4881da177e4SLinus Torvalds 		 */
48977589ce0SEric W. Biederman 		rt = ip_route_output_ports(net, fl4, sk,
49078fbfd8aSDavid S. Miller 					   daddr, inet->inet_saddr,
49178fbfd8aSDavid S. Miller 					   inet->inet_dport,
49278fbfd8aSDavid S. Miller 					   inet->inet_sport,
49378fbfd8aSDavid S. Miller 					   sk->sk_protocol,
49469b9e1e0SXin Long 					   RT_CONN_FLAGS_TOS(sk, tos),
49578fbfd8aSDavid S. Miller 					   sk->sk_bound_dev_if);
496b23dd4feSDavid S. Miller 		if (IS_ERR(rt))
4971da177e4SLinus Torvalds 			goto no_route;
498d8d1f30bSChangli Gao 		sk_setup_caps(sk, &rt->dst);
4991da177e4SLinus Torvalds 	}
500d8d1f30bSChangli Gao 	skb_dst_set_noref(skb, &rt->dst);
5011da177e4SLinus Torvalds 
5021da177e4SLinus Torvalds packet_routed:
50377d5bc7eSDavid Ahern 	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
5041da177e4SLinus Torvalds 		goto no_route;
5051da177e4SLinus Torvalds 
5061da177e4SLinus Torvalds 	/* OK, we know where to send it, allocate and build IP header. */
507f6d8bd05SEric Dumazet 	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
5088856dfa3SArnaldo Carvalho de Melo 	skb_reset_network_header(skb);
509eddc9ec5SArnaldo Carvalho de Melo 	iph = ip_hdr(skb);
51069b9e1e0SXin Long 	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
51160ff7467SWANG Cong 	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
5121da177e4SLinus Torvalds 		iph->frag_off = htons(IP_DF);
5131da177e4SLinus Torvalds 	else
5141da177e4SLinus Torvalds 		iph->frag_off = 0;
515d8d1f30bSChangli Gao 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
5161da177e4SLinus Torvalds 	iph->protocol = sk->sk_protocol;
51784f9307cSEric Dumazet 	ip_copy_addrs(iph, fl4);
51884f9307cSEric Dumazet 
5191da177e4SLinus Torvalds 	/* Transport layer set skb->h.foo itself. */
5201da177e4SLinus Torvalds 
521f6d8bd05SEric Dumazet 	if (inet_opt && inet_opt->opt.optlen) {
522f6d8bd05SEric Dumazet 		iph->ihl += inet_opt->opt.optlen >> 2;
5234f0e3040SJakub Kicinski 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
5241da177e4SLinus Torvalds 	}
5251da177e4SLinus Torvalds 
52677589ce0SEric W. Biederman 	ip_select_ident_segs(net, skb, sk,
527b6a7719aSHannes Frederic Sowa 			     skb_shinfo(skb)->gso_segs ?: 1);
5281da177e4SLinus Torvalds 
529b0270e91SEric Dumazet 	/* TODO : should we use skb->sk here instead of sk ? */
5301da177e4SLinus Torvalds 	skb->priority = sk->sk_priority;
5314a19ec58SLaszlo Attila Toth 	skb->mark = sk->sk_mark;
5321da177e4SLinus Torvalds 
53333224b16SEric W. Biederman 	res = ip_local_out(net, sk, skb);
534ab6e3febSEric Dumazet 	rcu_read_unlock();
535ab6e3febSEric Dumazet 	return res;
5361da177e4SLinus Torvalds 
5371da177e4SLinus Torvalds no_route:
538ab6e3febSEric Dumazet 	rcu_read_unlock();
53977589ce0SEric W. Biederman 	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
5405e187189SMenglong Dong 	kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
5411da177e4SLinus Torvalds 	return -EHOSTUNREACH;
5421da177e4SLinus Torvalds }
54369b9e1e0SXin Long EXPORT_SYMBOL(__ip_queue_xmit);
5441da177e4SLinus Torvalds 
54505e22e83SEric Dumazet int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
54605e22e83SEric Dumazet {
54705e22e83SEric Dumazet 	return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
54805e22e83SEric Dumazet }
54905e22e83SEric Dumazet EXPORT_SYMBOL(ip_queue_xmit);
55005e22e83SEric Dumazet 
5511da177e4SLinus Torvalds static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
5521da177e4SLinus Torvalds {
5531da177e4SLinus Torvalds 	to->pkt_type = from->pkt_type;
5541da177e4SLinus Torvalds 	to->priority = from->priority;
5551da177e4SLinus Torvalds 	to->protocol = from->protocol;
556d2f0c961SShmulik Ladkani 	to->skb_iif = from->skb_iif;
557adf30907SEric Dumazet 	skb_dst_drop(to);
558fe76cda3SEric Dumazet 	skb_dst_copy(to, from);
5591da177e4SLinus Torvalds 	to->dev = from->dev;
56082e91ffeSThomas Graf 	to->mark = from->mark;
5611da177e4SLinus Torvalds 
5623dd1c9a1SPaolo Abeni 	skb_copy_hash(to, from);
5633dd1c9a1SPaolo Abeni 
5641da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED
5651da177e4SLinus Torvalds 	to->tc_index = from->tc_index;
5661da177e4SLinus Torvalds #endif
567e7ac05f3SYasuyuki Kozakai 	nf_copy(to, from);
568df5042f4SFlorian Westphal 	skb_ext_copy(to, from);
5696ca40d4eSJavier Martinez Canillas #if IS_ENABLED(CONFIG_IP_VS)
570c98d80edSJulian Anastasov 	to->ipvs_property = from->ipvs_property;
571c98d80edSJulian Anastasov #endif
572984bc16cSJames Morris 	skb_copy_secmark(to, from);
5731da177e4SLinus Torvalds }
5741da177e4SLinus Torvalds 
575694869b3SEric W. Biederman static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
576c5501eb3SFlorian Westphal 		       unsigned int mtu,
577694869b3SEric W. Biederman 		       int (*output)(struct net *, struct sock *, struct sk_buff *))
57849d16b23SAndy Zhou {
57949d16b23SAndy Zhou 	struct iphdr *iph = ip_hdr(skb);
58049d16b23SAndy Zhou 
581d6b915e2SFlorian Westphal 	if ((iph->frag_off & htons(IP_DF)) == 0)
582694869b3SEric W. Biederman 		return ip_do_fragment(net, sk, skb, output);
583d6b915e2SFlorian Westphal 
584d6b915e2SFlorian Westphal 	if (unlikely(!skb->ignore_df ||
58549d16b23SAndy Zhou 		     (IPCB(skb)->frag_max_size &&
58649d16b23SAndy Zhou 		      IPCB(skb)->frag_max_size > mtu))) {
5879479b0afSEric W. Biederman 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
58849d16b23SAndy Zhou 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
58949d16b23SAndy Zhou 			  htonl(mtu));
59049d16b23SAndy Zhou 		kfree_skb(skb);
59149d16b23SAndy Zhou 		return -EMSGSIZE;
59249d16b23SAndy Zhou 	}
59349d16b23SAndy Zhou 
594694869b3SEric W. Biederman 	return ip_do_fragment(net, sk, skb, output);
59549d16b23SAndy Zhou }
59649d16b23SAndy Zhou 
597c8b17be0SPablo Neira Ayuso void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
598c8b17be0SPablo Neira Ayuso 		      unsigned int hlen, struct ip_fraglist_iter *iter)
599c8b17be0SPablo Neira Ayuso {
600c8b17be0SPablo Neira Ayuso 	unsigned int first_len = skb_pagelen(skb);
601c8b17be0SPablo Neira Ayuso 
602b7034146SEric Dumazet 	iter->frag = skb_shinfo(skb)->frag_list;
603c8b17be0SPablo Neira Ayuso 	skb_frag_list_init(skb);
604c8b17be0SPablo Neira Ayuso 
605c8b17be0SPablo Neira Ayuso 	iter->offset = 0;
606c8b17be0SPablo Neira Ayuso 	iter->iph = iph;
607c8b17be0SPablo Neira Ayuso 	iter->hlen = hlen;
608c8b17be0SPablo Neira Ayuso 
609c8b17be0SPablo Neira Ayuso 	skb->data_len = first_len - skb_headlen(skb);
610c8b17be0SPablo Neira Ayuso 	skb->len = first_len;
611c8b17be0SPablo Neira Ayuso 	iph->tot_len = htons(first_len);
612c8b17be0SPablo Neira Ayuso 	iph->frag_off = htons(IP_MF);
613c8b17be0SPablo Neira Ayuso 	ip_send_check(iph);
614c8b17be0SPablo Neira Ayuso }
615c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_init);
616c8b17be0SPablo Neira Ayuso 
617c8b17be0SPablo Neira Ayuso void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
618c8b17be0SPablo Neira Ayuso {
619c8b17be0SPablo Neira Ayuso 	unsigned int hlen = iter->hlen;
620c8b17be0SPablo Neira Ayuso 	struct iphdr *iph = iter->iph;
621c8b17be0SPablo Neira Ayuso 	struct sk_buff *frag;
622c8b17be0SPablo Neira Ayuso 
623c8b17be0SPablo Neira Ayuso 	frag = iter->frag;
624c8b17be0SPablo Neira Ayuso 	frag->ip_summed = CHECKSUM_NONE;
625c8b17be0SPablo Neira Ayuso 	skb_reset_transport_header(frag);
626c8b17be0SPablo Neira Ayuso 	__skb_push(frag, hlen);
627c8b17be0SPablo Neira Ayuso 	skb_reset_network_header(frag);
628c8b17be0SPablo Neira Ayuso 	memcpy(skb_network_header(frag), iph, hlen);
629c8b17be0SPablo Neira Ayuso 	iter->iph = ip_hdr(frag);
630c8b17be0SPablo Neira Ayuso 	iph = iter->iph;
631c8b17be0SPablo Neira Ayuso 	iph->tot_len = htons(frag->len);
632c8b17be0SPablo Neira Ayuso 	ip_copy_metadata(frag, skb);
633c8b17be0SPablo Neira Ayuso 	iter->offset += skb->len - hlen;
634c8b17be0SPablo Neira Ayuso 	iph->frag_off = htons(iter->offset >> 3);
635c8b17be0SPablo Neira Ayuso 	if (frag->next)
636c8b17be0SPablo Neira Ayuso 		iph->frag_off |= htons(IP_MF);
637c8b17be0SPablo Neira Ayuso 	/* Ready, complete checksum */
638c8b17be0SPablo Neira Ayuso 	ip_send_check(iph);
639c8b17be0SPablo Neira Ayuso }
640c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_prepare);
641c8b17be0SPablo Neira Ayuso 
642065ff79fSPablo Neira Ayuso void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
643e7a409c3SEric Dumazet 		  unsigned int ll_rs, unsigned int mtu, bool DF,
644065ff79fSPablo Neira Ayuso 		  struct ip_frag_state *state)
645065ff79fSPablo Neira Ayuso {
646065ff79fSPablo Neira Ayuso 	struct iphdr *iph = ip_hdr(skb);
647065ff79fSPablo Neira Ayuso 
648e7a409c3SEric Dumazet 	state->DF = DF;
649065ff79fSPablo Neira Ayuso 	state->hlen = hlen;
650065ff79fSPablo Neira Ayuso 	state->ll_rs = ll_rs;
651065ff79fSPablo Neira Ayuso 	state->mtu = mtu;
652065ff79fSPablo Neira Ayuso 
653065ff79fSPablo Neira Ayuso 	state->left = skb->len - hlen;	/* Space per frame */
654065ff79fSPablo Neira Ayuso 	state->ptr = hlen;		/* Where to start from */
655065ff79fSPablo Neira Ayuso 
656065ff79fSPablo Neira Ayuso 	state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
657065ff79fSPablo Neira Ayuso 	state->not_last_frag = iph->frag_off & htons(IP_MF);
658065ff79fSPablo Neira Ayuso }
659065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_init);
660065ff79fSPablo Neira Ayuso 
66119c3401aSPablo Neira Ayuso static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
662faf482caSYajun Deng 			 bool first_frag)
66319c3401aSPablo Neira Ayuso {
66419c3401aSPablo Neira Ayuso 	/* Copy the flags to each fragment. */
66519c3401aSPablo Neira Ayuso 	IPCB(to)->flags = IPCB(from)->flags;
66619c3401aSPablo Neira Ayuso 
66719c3401aSPablo Neira Ayuso 	/* ANK: dirty, but effective trick. Upgrade options only if
66819c3401aSPablo Neira Ayuso 	 * the segment to be fragmented was THE FIRST (otherwise,
66919c3401aSPablo Neira Ayuso 	 * options are already fixed) and make it ONCE
67019c3401aSPablo Neira Ayuso 	 * on the initial skb, so that all the following fragments
67119c3401aSPablo Neira Ayuso 	 * will inherit fixed options.
67219c3401aSPablo Neira Ayuso 	 */
67319c3401aSPablo Neira Ayuso 	if (first_frag)
67419c3401aSPablo Neira Ayuso 		ip_options_fragment(from);
67519c3401aSPablo Neira Ayuso }
67619c3401aSPablo Neira Ayuso 
677065ff79fSPablo Neira Ayuso struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
678065ff79fSPablo Neira Ayuso {
679065ff79fSPablo Neira Ayuso 	unsigned int len = state->left;
680065ff79fSPablo Neira Ayuso 	struct sk_buff *skb2;
681065ff79fSPablo Neira Ayuso 	struct iphdr *iph;
682065ff79fSPablo Neira Ayuso 
683065ff79fSPablo Neira Ayuso 	/* IF: it doesn't fit, use 'mtu' - the data space left */
684065ff79fSPablo Neira Ayuso 	if (len > state->mtu)
685065ff79fSPablo Neira Ayuso 		len = state->mtu;
686065ff79fSPablo Neira Ayuso 	/* IF: we are not sending up to and including the packet end
687065ff79fSPablo Neira Ayuso 	   then align the next start on an eight byte boundary */
688065ff79fSPablo Neira Ayuso 	if (len < state->left)	{
689065ff79fSPablo Neira Ayuso 		len &= ~7;
690065ff79fSPablo Neira Ayuso 	}
691065ff79fSPablo Neira Ayuso 
692065ff79fSPablo Neira Ayuso 	/* Allocate buffer */
693065ff79fSPablo Neira Ayuso 	skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
694065ff79fSPablo Neira Ayuso 	if (!skb2)
695065ff79fSPablo Neira Ayuso 		return ERR_PTR(-ENOMEM);
696065ff79fSPablo Neira Ayuso 
697065ff79fSPablo Neira Ayuso 	/*
698065ff79fSPablo Neira Ayuso 	 *	Set up data on packet
699065ff79fSPablo Neira Ayuso 	 */
700065ff79fSPablo Neira Ayuso 
701065ff79fSPablo Neira Ayuso 	ip_copy_metadata(skb2, skb);
702065ff79fSPablo Neira Ayuso 	skb_reserve(skb2, state->ll_rs);
703065ff79fSPablo Neira Ayuso 	skb_put(skb2, len + state->hlen);
704065ff79fSPablo Neira Ayuso 	skb_reset_network_header(skb2);
705065ff79fSPablo Neira Ayuso 	skb2->transport_header = skb2->network_header + state->hlen;
706065ff79fSPablo Neira Ayuso 
707065ff79fSPablo Neira Ayuso 	/*
708065ff79fSPablo Neira Ayuso 	 *	Charge the memory for the fragment to any owner
709065ff79fSPablo Neira Ayuso 	 *	it might possess
710065ff79fSPablo Neira Ayuso 	 */
711065ff79fSPablo Neira Ayuso 
712065ff79fSPablo Neira Ayuso 	if (skb->sk)
713065ff79fSPablo Neira Ayuso 		skb_set_owner_w(skb2, skb->sk);
714065ff79fSPablo Neira Ayuso 
715065ff79fSPablo Neira Ayuso 	/*
716065ff79fSPablo Neira Ayuso 	 *	Copy the packet header into the new buffer.
717065ff79fSPablo Neira Ayuso 	 */
718065ff79fSPablo Neira Ayuso 
719065ff79fSPablo Neira Ayuso 	skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
720065ff79fSPablo Neira Ayuso 
721065ff79fSPablo Neira Ayuso 	/*
722065ff79fSPablo Neira Ayuso 	 *	Copy a block of the IP datagram.
723065ff79fSPablo Neira Ayuso 	 */
724065ff79fSPablo Neira Ayuso 	if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
725065ff79fSPablo Neira Ayuso 		BUG();
726065ff79fSPablo Neira Ayuso 	state->left -= len;
727065ff79fSPablo Neira Ayuso 
728065ff79fSPablo Neira Ayuso 	/*
729065ff79fSPablo Neira Ayuso 	 *	Fill in the new header fields.
730065ff79fSPablo Neira Ayuso 	 */
731065ff79fSPablo Neira Ayuso 	iph = ip_hdr(skb2);
732065ff79fSPablo Neira Ayuso 	iph->frag_off = htons((state->offset >> 3));
733e7a409c3SEric Dumazet 	if (state->DF)
734e7a409c3SEric Dumazet 		iph->frag_off |= htons(IP_DF);
735065ff79fSPablo Neira Ayuso 
736065ff79fSPablo Neira Ayuso 	/*
737065ff79fSPablo Neira Ayuso 	 *	Added AC : If we are fragmenting a fragment that's not the
738065ff79fSPablo Neira Ayuso 	 *		   last fragment then keep MF on each bit
739065ff79fSPablo Neira Ayuso 	 */
740065ff79fSPablo Neira Ayuso 	if (state->left > 0 || state->not_last_frag)
741065ff79fSPablo Neira Ayuso 		iph->frag_off |= htons(IP_MF);
742065ff79fSPablo Neira Ayuso 	state->ptr += len;
743065ff79fSPablo Neira Ayuso 	state->offset += len;
744065ff79fSPablo Neira Ayuso 
745065ff79fSPablo Neira Ayuso 	iph->tot_len = htons(len + state->hlen);
746065ff79fSPablo Neira Ayuso 
747065ff79fSPablo Neira Ayuso 	ip_send_check(iph);
748065ff79fSPablo Neira Ayuso 
749065ff79fSPablo Neira Ayuso 	return skb2;
750065ff79fSPablo Neira Ayuso }
751065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_next);
752065ff79fSPablo Neira Ayuso 
7531da177e4SLinus Torvalds /*
7541da177e4SLinus Torvalds  *	This IP datagram is too large to be sent in one piece.  Break it up into
7551da177e4SLinus Torvalds  *	smaller pieces (each of size equal to IP header plus
7561da177e4SLinus Torvalds  *	a block of the data of the original IP data part) that will yet fit in a
7571da177e4SLinus Torvalds  *	single device frame, and queue such a frame for sending.
7581da177e4SLinus Torvalds  */
7591da177e4SLinus Torvalds 
760694869b3SEric W. Biederman int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
761694869b3SEric W. Biederman 		   int (*output)(struct net *, struct sock *, struct sk_buff *))
7621da177e4SLinus Torvalds {
7631da177e4SLinus Torvalds 	struct iphdr *iph;
7641da177e4SLinus Torvalds 	struct sk_buff *skb2;
765a1ac9c8aSMartin KaFai Lau 	bool mono_delivery_time = skb->mono_delivery_time;
766511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
767065ff79fSPablo Neira Ayuso 	unsigned int mtu, hlen, ll_rs;
768c8b17be0SPablo Neira Ayuso 	struct ip_fraglist_iter iter;
7699669fffcSEric Dumazet 	ktime_t tstamp = skb->tstamp;
770065ff79fSPablo Neira Ayuso 	struct ip_frag_state state;
7711da177e4SLinus Torvalds 	int err = 0;
7721da177e4SLinus Torvalds 
773dbd3393cSHannes Frederic Sowa 	/* for offloaded checksums cleanup checksum before fragmentation */
774dbd3393cSHannes Frederic Sowa 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
775dbd3393cSHannes Frederic Sowa 	    (err = skb_checksum_help(skb)))
776dbd3393cSHannes Frederic Sowa 		goto fail;
777dbd3393cSHannes Frederic Sowa 
7781da177e4SLinus Torvalds 	/*
7791da177e4SLinus Torvalds 	 *	Point into the IP datagram header.
7801da177e4SLinus Torvalds 	 */
7811da177e4SLinus Torvalds 
782eddc9ec5SArnaldo Carvalho de Melo 	iph = ip_hdr(skb);
7831da177e4SLinus Torvalds 
784fedbb6b4SShmulik Ladkani 	mtu = ip_skb_dst_mtu(sk, skb);
785d6b915e2SFlorian Westphal 	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
786d6b915e2SFlorian Westphal 		mtu = IPCB(skb)->frag_max_size;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 	/*
7891da177e4SLinus Torvalds 	 *	Setup starting values.
7901da177e4SLinus Torvalds 	 */
7911da177e4SLinus Torvalds 
7921da177e4SLinus Torvalds 	hlen = iph->ihl * 4;
793f87c10a8SHannes Frederic Sowa 	mtu = mtu - hlen;	/* Size of data space */
79489cee8b1SHerbert Xu 	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
795254d900bSVasily Averin 	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
7961da177e4SLinus Torvalds 
7971da177e4SLinus Torvalds 	/* When frag_list is given, use it. First, check its validity:
7981da177e4SLinus Torvalds 	 * some transformers could create wrong frag_list or break existing
7991da177e4SLinus Torvalds 	 * one, it is not prohibited. In this case fall back to copying.
8001da177e4SLinus Torvalds 	 *
8011da177e4SLinus Torvalds 	 * LATER: this step can be merged to real generation of fragments,
8021da177e4SLinus Torvalds 	 * we can switch to copy when see the first bad fragment.
8031da177e4SLinus Torvalds 	 */
80421dc3301SDavid S. Miller 	if (skb_has_frag_list(skb)) {
8053d13008eSEric Dumazet 		struct sk_buff *frag, *frag2;
806c72d8cdaSAlexey Dobriyan 		unsigned int first_len = skb_pagelen(skb);
8071da177e4SLinus Torvalds 
8081da177e4SLinus Torvalds 		if (first_len - hlen > mtu ||
8091da177e4SLinus Torvalds 		    ((first_len - hlen) & 7) ||
81056f8a75cSPaul Gortmaker 		    ip_is_fragment(iph) ||
811254d900bSVasily Averin 		    skb_cloned(skb) ||
812254d900bSVasily Averin 		    skb_headroom(skb) < ll_rs)
8131da177e4SLinus Torvalds 			goto slow_path;
8141da177e4SLinus Torvalds 
815d7fcf1a5SDavid S. Miller 		skb_walk_frags(skb, frag) {
8161da177e4SLinus Torvalds 			/* Correct geometry. */
8171da177e4SLinus Torvalds 			if (frag->len > mtu ||
8181da177e4SLinus Torvalds 			    ((frag->len & 7) && frag->next) ||
819254d900bSVasily Averin 			    skb_headroom(frag) < hlen + ll_rs)
8203d13008eSEric Dumazet 				goto slow_path_clean;
8211da177e4SLinus Torvalds 
8221da177e4SLinus Torvalds 			/* Partially cloned skb? */
8231da177e4SLinus Torvalds 			if (skb_shared(frag))
8243d13008eSEric Dumazet 				goto slow_path_clean;
8252fdba6b0SHerbert Xu 
8262fdba6b0SHerbert Xu 			BUG_ON(frag->sk);
8272fdba6b0SHerbert Xu 			if (skb->sk) {
8282fdba6b0SHerbert Xu 				frag->sk = skb->sk;
8292fdba6b0SHerbert Xu 				frag->destructor = sock_wfree;
8302fdba6b0SHerbert Xu 			}
8313d13008eSEric Dumazet 			skb->truesize -= frag->truesize;
8321da177e4SLinus Torvalds 		}
8331da177e4SLinus Torvalds 
8341da177e4SLinus Torvalds 		/* Everything is OK. Generate! */
835c8b17be0SPablo Neira Ayuso 		ip_fraglist_init(skb, iph, hlen, &iter);
8361b9fbe81SYajun Deng 
8371da177e4SLinus Torvalds 		for (;;) {
8381da177e4SLinus Torvalds 			/* Prepare header of the next frame,
8391da177e4SLinus Torvalds 			 * before previous one went down. */
84019c3401aSPablo Neira Ayuso 			if (iter.frag) {
84127a8caa5SJakub Kicinski 				bool first_frag = (iter.offset == 0);
84227a8caa5SJakub Kicinski 
843faf482caSYajun Deng 				IPCB(iter.frag)->flags = IPCB(skb)->flags;
844c8b17be0SPablo Neira Ayuso 				ip_fraglist_prepare(skb, &iter);
84527a8caa5SJakub Kicinski 				if (first_frag && IPCB(skb)->opt.optlen) {
84627a8caa5SJakub Kicinski 					/* ipcb->opt is not populated for frags
84727a8caa5SJakub Kicinski 					 * coming from __ip_make_skb(),
84827a8caa5SJakub Kicinski 					 * ip_options_fragment() needs optlen
84927a8caa5SJakub Kicinski 					 */
85027a8caa5SJakub Kicinski 					IPCB(iter.frag)->opt.optlen =
85127a8caa5SJakub Kicinski 						IPCB(skb)->opt.optlen;
85227a8caa5SJakub Kicinski 					ip_options_fragment(iter.frag);
85327a8caa5SJakub Kicinski 					ip_send_check(iter.iph);
85427a8caa5SJakub Kicinski 				}
85519c3401aSPablo Neira Ayuso 			}
8561da177e4SLinus Torvalds 
857a1ac9c8aSMartin KaFai Lau 			skb_set_delivery_time(skb, tstamp, mono_delivery_time);
858694869b3SEric W. Biederman 			err = output(net, sk, skb);
8591da177e4SLinus Torvalds 
860dafee490SWei Dong 			if (!err)
86126a949dbSEric W. Biederman 				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
862c8b17be0SPablo Neira Ayuso 			if (err || !iter.frag)
8631da177e4SLinus Torvalds 				break;
8641da177e4SLinus Torvalds 
865c8b17be0SPablo Neira Ayuso 			skb = ip_fraglist_next(&iter);
8661da177e4SLinus Torvalds 		}
8671da177e4SLinus Torvalds 
8681da177e4SLinus Torvalds 		if (err == 0) {
86926a949dbSEric W. Biederman 			IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
8701da177e4SLinus Torvalds 			return 0;
8711da177e4SLinus Torvalds 		}
8721da177e4SLinus Torvalds 
873b7034146SEric Dumazet 		kfree_skb_list(iter.frag);
874942f146aSPablo Neira Ayuso 
87526a949dbSEric W. Biederman 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
8761da177e4SLinus Torvalds 		return err;
8773d13008eSEric Dumazet 
8783d13008eSEric Dumazet slow_path_clean:
8793d13008eSEric Dumazet 		skb_walk_frags(skb, frag2) {
8803d13008eSEric Dumazet 			if (frag2 == frag)
8813d13008eSEric Dumazet 				break;
8823d13008eSEric Dumazet 			frag2->sk = NULL;
8833d13008eSEric Dumazet 			frag2->destructor = NULL;
8843d13008eSEric Dumazet 			skb->truesize += frag2->truesize;
8853d13008eSEric Dumazet 		}
8861da177e4SLinus Torvalds 	}
8871da177e4SLinus Torvalds 
8881da177e4SLinus Torvalds slow_path:
8891da177e4SLinus Torvalds 	/*
8901da177e4SLinus Torvalds 	 *	Fragment the datagram.
8911da177e4SLinus Torvalds 	 */
8921da177e4SLinus Torvalds 
893e7a409c3SEric Dumazet 	ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
894e7a409c3SEric Dumazet 		     &state);
8951da177e4SLinus Torvalds 
8961da177e4SLinus Torvalds 	/*
8971da177e4SLinus Torvalds 	 *	Keep copying data until we run out.
8981da177e4SLinus Torvalds 	 */
8991da177e4SLinus Torvalds 
900065ff79fSPablo Neira Ayuso 	while (state.left > 0) {
90119c3401aSPablo Neira Ayuso 		bool first_frag = (state.offset == 0);
90219c3401aSPablo Neira Ayuso 
903065ff79fSPablo Neira Ayuso 		skb2 = ip_frag_next(skb, &state);
904065ff79fSPablo Neira Ayuso 		if (IS_ERR(skb2)) {
905065ff79fSPablo Neira Ayuso 			err = PTR_ERR(skb2);
9061da177e4SLinus Torvalds 			goto fail;
9071da177e4SLinus Torvalds 		}
908faf482caSYajun Deng 		ip_frag_ipcb(skb, skb2, first_frag);
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds 		/*
9111da177e4SLinus Torvalds 		 *	Put this fragment into the sending queue.
9121da177e4SLinus Torvalds 		 */
913a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
914694869b3SEric W. Biederman 		err = output(net, sk, skb2);
9151da177e4SLinus Torvalds 		if (err)
9161da177e4SLinus Torvalds 			goto fail;
917dafee490SWei Dong 
91826a949dbSEric W. Biederman 		IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
9191da177e4SLinus Torvalds 	}
9205d0ba55bSEric Dumazet 	consume_skb(skb);
92126a949dbSEric W. Biederman 	IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
9221da177e4SLinus Torvalds 	return err;
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds fail:
9251da177e4SLinus Torvalds 	kfree_skb(skb);
92626a949dbSEric W. Biederman 	IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
9271da177e4SLinus Torvalds 	return err;
9281da177e4SLinus Torvalds }
92949d16b23SAndy Zhou EXPORT_SYMBOL(ip_do_fragment);
9302e2f7aefSPatrick McHardy 
9311da177e4SLinus Torvalds int
9321da177e4SLinus Torvalds ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
9331da177e4SLinus Torvalds {
934f69e6d13SAl Viro 	struct msghdr *msg = from;
9351da177e4SLinus Torvalds 
93684fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
9370b62fca2SAl Viro 		if (!copy_from_iter_full(to, len, &msg->msg_iter))
9381da177e4SLinus Torvalds 			return -EFAULT;
9391da177e4SLinus Torvalds 	} else {
94044bb9363SAl Viro 		__wsum csum = 0;
9410b62fca2SAl Viro 		if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
9421da177e4SLinus Torvalds 			return -EFAULT;
9431da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, csum, odd);
9441da177e4SLinus Torvalds 	}
9451da177e4SLinus Torvalds 	return 0;
9461da177e4SLinus Torvalds }
9474bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_generic_getfrag);
9481da177e4SLinus Torvalds 
949f5fca608SDavid S. Miller static int __ip_append_data(struct sock *sk,
950f5fca608SDavid S. Miller 			    struct flowi4 *fl4,
951f5fca608SDavid S. Miller 			    struct sk_buff_head *queue,
9521470ddf7SHerbert Xu 			    struct inet_cork *cork,
9535640f768SEric Dumazet 			    struct page_frag *pfrag,
9541470ddf7SHerbert Xu 			    int getfrag(void *from, char *to, int offset,
9551470ddf7SHerbert Xu 					int len, int odd, struct sk_buff *skb),
9561da177e4SLinus Torvalds 			    void *from, int length, int transhdrlen,
9571da177e4SLinus Torvalds 			    unsigned int flags)
9581da177e4SLinus Torvalds {
9591da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
960b5947e5dSWillem de Bruijn 	struct ubuf_info *uarg = NULL;
9611da177e4SLinus Torvalds 	struct sk_buff *skb;
96207df5294SHerbert Xu 	struct ip_options *opt = cork->opt;
9631da177e4SLinus Torvalds 	int hh_len;
9641da177e4SLinus Torvalds 	int exthdrlen;
9651da177e4SLinus Torvalds 	int mtu;
9661da177e4SLinus Torvalds 	int copy;
9671da177e4SLinus Torvalds 	int err;
9681da177e4SLinus Torvalds 	int offset = 0;
9698eb77cc7SPavel Begunkov 	bool zc = false;
970daba287bSHannes Frederic Sowa 	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
9711da177e4SLinus Torvalds 	int csummode = CHECKSUM_NONE;
9721470ddf7SHerbert Xu 	struct rtable *rt = (struct rtable *)cork->dst;
973694aba69SEric Dumazet 	unsigned int wmem_alloc_delta = 0;
974100f6d8eSWillem de Bruijn 	bool paged, extra_uref = false;
97509c2d251SWillem de Bruijn 	u32 tskey = 0;
9761da177e4SLinus Torvalds 
97796d7303eSSteffen Klassert 	skb = skb_peek_tail(queue);
97896d7303eSSteffen Klassert 
97996d7303eSSteffen Klassert 	exthdrlen = !skb ? rt->dst.header_len : 0;
980bec1f6f6SWillem de Bruijn 	mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
98115e36f5bSWillem de Bruijn 	paged = !!cork->gso_size;
982bec1f6f6SWillem de Bruijn 
9838ca5a579SVadim Fedorenko 	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
98409c2d251SWillem de Bruijn 	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
985a1cdec57SEric Dumazet 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
9861470ddf7SHerbert Xu 
987d8d1f30bSChangli Gao 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
9881da177e4SLinus Torvalds 
9891da177e4SLinus Torvalds 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
9901da177e4SLinus Torvalds 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
991cbc08a33SMiaohe Lin 	maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
9921da177e4SLinus Torvalds 
993daba287bSHannes Frederic Sowa 	if (cork->length + length > maxnonfragsize - fragheaderlen) {
994f5fca608SDavid S. Miller 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
99561e7f09dSHannes Frederic Sowa 			       mtu - (opt ? opt->optlen : 0));
9961da177e4SLinus Torvalds 		return -EMSGSIZE;
9971da177e4SLinus Torvalds 	}
9981da177e4SLinus Torvalds 
9991da177e4SLinus Torvalds 	/*
10001da177e4SLinus Torvalds 	 * transhdrlen > 0 means that this is the first fragment and we wish
10011da177e4SLinus Torvalds 	 * it won't be fragmented in the future.
10021da177e4SLinus Torvalds 	 */
10031da177e4SLinus Torvalds 	if (transhdrlen &&
10041da177e4SLinus Torvalds 	    length + fragheaderlen <= mtu &&
1005c8cd0989STom Herbert 	    rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1006bec1f6f6SWillem de Bruijn 	    (!(flags & MSG_MORE) || cork->gso_size) &&
1007cd027a54SJacek Kalwas 	    (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
100884fa7933SPatrick McHardy 		csummode = CHECKSUM_PARTIAL;
10091da177e4SLinus Torvalds 
1010c445f31bSPavel Begunkov 	if ((flags & MSG_ZEROCOPY) && length) {
1011c445f31bSPavel Begunkov 		struct msghdr *msg = from;
1012c445f31bSPavel Begunkov 
1013c445f31bSPavel Begunkov 		if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1014c445f31bSPavel Begunkov 			if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1015c445f31bSPavel Begunkov 				return -EINVAL;
1016c445f31bSPavel Begunkov 
1017c445f31bSPavel Begunkov 			/* Leave uarg NULL if can't zerocopy, callers should
1018c445f31bSPavel Begunkov 			 * be able to handle it.
1019c445f31bSPavel Begunkov 			 */
1020c445f31bSPavel Begunkov 			if ((rt->dst.dev->features & NETIF_F_SG) &&
1021c445f31bSPavel Begunkov 			    csummode == CHECKSUM_PARTIAL) {
1022c445f31bSPavel Begunkov 				paged = true;
1023c445f31bSPavel Begunkov 				zc = true;
1024c445f31bSPavel Begunkov 				uarg = msg->msg_ubuf;
1025c445f31bSPavel Begunkov 			}
1026c445f31bSPavel Begunkov 		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
10278c793822SJonathan Lemon 			uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1028b5947e5dSWillem de Bruijn 			if (!uarg)
1029b5947e5dSWillem de Bruijn 				return -ENOBUFS;
1030522924b5SWillem de Bruijn 			extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
1031b5947e5dSWillem de Bruijn 			if (rt->dst.dev->features & NETIF_F_SG &&
1032b5947e5dSWillem de Bruijn 			    csummode == CHECKSUM_PARTIAL) {
1033b5947e5dSWillem de Bruijn 				paged = true;
10348eb77cc7SPavel Begunkov 				zc = true;
1035b5947e5dSWillem de Bruijn 			} else {
1036e7d2b510SPavel Begunkov 				uarg_to_msgzc(uarg)->zerocopy = 0;
103752900d22SWillem de Bruijn 				skb_zcopy_set(skb, uarg, &extra_uref);
1038b5947e5dSWillem de Bruijn 			}
1039b5947e5dSWillem de Bruijn 		}
10407da0dde6SDavid Howells 	} else if ((flags & MSG_SPLICE_PAGES) && length) {
10417da0dde6SDavid Howells 		if (inet->hdrincl)
10427da0dde6SDavid Howells 			return -EPERM;
10437da0dde6SDavid Howells 		if (rt->dst.dev->features & NETIF_F_SG)
10447da0dde6SDavid Howells 			/* We need an empty buffer to attach stuff to */
10457da0dde6SDavid Howells 			paged = true;
10467da0dde6SDavid Howells 		else
10477da0dde6SDavid Howells 			flags &= ~MSG_SPLICE_PAGES;
1048c445f31bSPavel Begunkov 	}
1049b5947e5dSWillem de Bruijn 
10501470ddf7SHerbert Xu 	cork->length += length;
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds 	/* So, what's going on in the loop below?
10531da177e4SLinus Torvalds 	 *
10541da177e4SLinus Torvalds 	 * We use calculated fragment length to generate chained skb,
10551da177e4SLinus Torvalds 	 * each of segments is IP fragment ready for sending to network after
10561da177e4SLinus Torvalds 	 * adding appropriate IP header.
10571da177e4SLinus Torvalds 	 */
10581da177e4SLinus Torvalds 
105926cde9f7SHerbert Xu 	if (!skb)
10601da177e4SLinus Torvalds 		goto alloc_new_skb;
10611da177e4SLinus Torvalds 
10621da177e4SLinus Torvalds 	while (length > 0) {
10631da177e4SLinus Torvalds 		/* Check if the remaining data fits into current packet. */
10641da177e4SLinus Torvalds 		copy = mtu - skb->len;
10651da177e4SLinus Torvalds 		if (copy < length)
10661da177e4SLinus Torvalds 			copy = maxfraglen - skb->len;
10671da177e4SLinus Torvalds 		if (copy <= 0) {
10681da177e4SLinus Torvalds 			char *data;
10691da177e4SLinus Torvalds 			unsigned int datalen;
10701da177e4SLinus Torvalds 			unsigned int fraglen;
10711da177e4SLinus Torvalds 			unsigned int fraggap;
10726d123b81SJakub Kicinski 			unsigned int alloclen, alloc_extra;
1073aba36930SWillem de Bruijn 			unsigned int pagedlen;
10741da177e4SLinus Torvalds 			struct sk_buff *skb_prev;
10751da177e4SLinus Torvalds alloc_new_skb:
10761da177e4SLinus Torvalds 			skb_prev = skb;
10771da177e4SLinus Torvalds 			if (skb_prev)
10781da177e4SLinus Torvalds 				fraggap = skb_prev->len - maxfraglen;
10791da177e4SLinus Torvalds 			else
10801da177e4SLinus Torvalds 				fraggap = 0;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 			/*
10831da177e4SLinus Torvalds 			 * If remaining data exceeds the mtu,
10841da177e4SLinus Torvalds 			 * we know we need more fragment(s).
10851da177e4SLinus Torvalds 			 */
10861da177e4SLinus Torvalds 			datalen = length + fraggap;
10871da177e4SLinus Torvalds 			if (datalen > mtu - fragheaderlen)
10881da177e4SLinus Torvalds 				datalen = maxfraglen - fragheaderlen;
10891da177e4SLinus Torvalds 			fraglen = datalen + fragheaderlen;
1090aba36930SWillem de Bruijn 			pagedlen = 0;
10911da177e4SLinus Torvalds 
10926d123b81SJakub Kicinski 			alloc_extra = hh_len + 15;
10936d123b81SJakub Kicinski 			alloc_extra += exthdrlen;
1094353e5c9aSSteffen Klassert 
10951da177e4SLinus Torvalds 			/* The last fragment gets additional space at tail.
10961da177e4SLinus Torvalds 			 * Note, with MSG_MORE we overallocate on fragments,
10971da177e4SLinus Torvalds 			 * because we have no idea what fragment will be
10981da177e4SLinus Torvalds 			 * the last.
10991da177e4SLinus Torvalds 			 */
110033f99dc7SSteffen Klassert 			if (datalen == length + fraggap)
11016d123b81SJakub Kicinski 				alloc_extra += rt->dst.trailer_len;
11026d123b81SJakub Kicinski 
11036d123b81SJakub Kicinski 			if ((flags & MSG_MORE) &&
11046d123b81SJakub Kicinski 			    !(rt->dst.dev->features&NETIF_F_SG))
11056d123b81SJakub Kicinski 				alloclen = mtu;
11066d123b81SJakub Kicinski 			else if (!paged &&
11076d123b81SJakub Kicinski 				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
11086d123b81SJakub Kicinski 				  !(rt->dst.dev->features & NETIF_F_SG)))
11096d123b81SJakub Kicinski 				alloclen = fraglen;
111047cf8899SPavel Begunkov 			else {
11118eb77cc7SPavel Begunkov 				alloclen = fragheaderlen + transhdrlen;
11128eb77cc7SPavel Begunkov 				pagedlen = datalen - transhdrlen;
11136d123b81SJakub Kicinski 			}
11146d123b81SJakub Kicinski 
11156d123b81SJakub Kicinski 			alloclen += alloc_extra;
111633f99dc7SSteffen Klassert 
11171da177e4SLinus Torvalds 			if (transhdrlen) {
11186d123b81SJakub Kicinski 				skb = sock_alloc_send_skb(sk, alloclen,
11191da177e4SLinus Torvalds 						(flags & MSG_DONTWAIT), &err);
11201da177e4SLinus Torvalds 			} else {
11211da177e4SLinus Torvalds 				skb = NULL;
1122694aba69SEric Dumazet 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
11231da177e4SLinus Torvalds 				    2 * sk->sk_sndbuf)
11246d123b81SJakub Kicinski 					skb = alloc_skb(alloclen,
11251da177e4SLinus Torvalds 							sk->sk_allocation);
112651456b29SIan Morris 				if (unlikely(!skb))
11271da177e4SLinus Torvalds 					err = -ENOBUFS;
11281da177e4SLinus Torvalds 			}
112951456b29SIan Morris 			if (!skb)
11301da177e4SLinus Torvalds 				goto error;
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds 			/*
11331da177e4SLinus Torvalds 			 *	Fill in the control structures
11341da177e4SLinus Torvalds 			 */
11351da177e4SLinus Torvalds 			skb->ip_summed = csummode;
11361da177e4SLinus Torvalds 			skb->csum = 0;
11371da177e4SLinus Torvalds 			skb_reserve(skb, hh_len);
113811878b40SWillem de Bruijn 
11391da177e4SLinus Torvalds 			/*
11401da177e4SLinus Torvalds 			 *	Find where to start putting bytes.
11411da177e4SLinus Torvalds 			 */
114215e36f5bSWillem de Bruijn 			data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1143c14d2450SArnaldo Carvalho de Melo 			skb_set_network_header(skb, exthdrlen);
1144b0e380b1SArnaldo Carvalho de Melo 			skb->transport_header = (skb->network_header +
1145b0e380b1SArnaldo Carvalho de Melo 						 fragheaderlen);
1146353e5c9aSSteffen Klassert 			data += fragheaderlen + exthdrlen;
11471da177e4SLinus Torvalds 
11481da177e4SLinus Torvalds 			if (fraggap) {
11491da177e4SLinus Torvalds 				skb->csum = skb_copy_and_csum_bits(
11501da177e4SLinus Torvalds 					skb_prev, maxfraglen,
11518d5930dfSAl Viro 					data + transhdrlen, fraggap);
11521da177e4SLinus Torvalds 				skb_prev->csum = csum_sub(skb_prev->csum,
11531da177e4SLinus Torvalds 							  skb->csum);
11541da177e4SLinus Torvalds 				data += fraggap;
1155e9fa4f7bSHerbert Xu 				pskb_trim_unique(skb_prev, maxfraglen);
11561da177e4SLinus Torvalds 			}
11571da177e4SLinus Torvalds 
115815e36f5bSWillem de Bruijn 			copy = datalen - transhdrlen - fraggap - pagedlen;
11591da177e4SLinus Torvalds 			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
11601da177e4SLinus Torvalds 				err = -EFAULT;
11611da177e4SLinus Torvalds 				kfree_skb(skb);
11621da177e4SLinus Torvalds 				goto error;
11631da177e4SLinus Torvalds 			}
11641da177e4SLinus Torvalds 
11651da177e4SLinus Torvalds 			offset += copy;
116615e36f5bSWillem de Bruijn 			length -= copy + transhdrlen;
11671da177e4SLinus Torvalds 			transhdrlen = 0;
11681da177e4SLinus Torvalds 			exthdrlen = 0;
11691da177e4SLinus Torvalds 			csummode = CHECKSUM_NONE;
11701da177e4SLinus Torvalds 
117152900d22SWillem de Bruijn 			/* only the initial fragment is time stamped */
117252900d22SWillem de Bruijn 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
117352900d22SWillem de Bruijn 			cork->tx_flags = 0;
117452900d22SWillem de Bruijn 			skb_shinfo(skb)->tskey = tskey;
117552900d22SWillem de Bruijn 			tskey = 0;
117652900d22SWillem de Bruijn 			skb_zcopy_set(skb, uarg, &extra_uref);
117752900d22SWillem de Bruijn 
11780dec879fSJulian Anastasov 			if ((flags & MSG_CONFIRM) && !skb_prev)
11790dec879fSJulian Anastasov 				skb_set_dst_pending_confirm(skb, 1);
11800dec879fSJulian Anastasov 
11811da177e4SLinus Torvalds 			/*
11821da177e4SLinus Torvalds 			 * Put the packet on the pending queue.
11831da177e4SLinus Torvalds 			 */
1184694aba69SEric Dumazet 			if (!skb->destructor) {
1185694aba69SEric Dumazet 				skb->destructor = sock_wfree;
1186694aba69SEric Dumazet 				skb->sk = sk;
1187694aba69SEric Dumazet 				wmem_alloc_delta += skb->truesize;
1188694aba69SEric Dumazet 			}
11891470ddf7SHerbert Xu 			__skb_queue_tail(queue, skb);
11901da177e4SLinus Torvalds 			continue;
11911da177e4SLinus Torvalds 		}
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds 		if (copy > length)
11941da177e4SLinus Torvalds 			copy = length;
11951da177e4SLinus Torvalds 
1196113f99c3SWillem de Bruijn 		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1197113f99c3SWillem de Bruijn 		    skb_tailroom(skb) >= copy) {
11981da177e4SLinus Torvalds 			unsigned int off;
11991da177e4SLinus Torvalds 
12001da177e4SLinus Torvalds 			off = skb->len;
12011da177e4SLinus Torvalds 			if (getfrag(from, skb_put(skb, copy),
12021da177e4SLinus Torvalds 					offset, copy, off, skb) < 0) {
12031da177e4SLinus Torvalds 				__skb_trim(skb, off);
12041da177e4SLinus Torvalds 				err = -EFAULT;
12051da177e4SLinus Torvalds 				goto error;
12061da177e4SLinus Torvalds 			}
12077da0dde6SDavid Howells 		} else if (flags & MSG_SPLICE_PAGES) {
12087da0dde6SDavid Howells 			struct msghdr *msg = from;
12097da0dde6SDavid Howells 
12107da0dde6SDavid Howells 			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
12117da0dde6SDavid Howells 						   sk->sk_allocation);
12127da0dde6SDavid Howells 			if (err < 0)
12137da0dde6SDavid Howells 				goto error;
12147da0dde6SDavid Howells 			copy = err;
12157da0dde6SDavid Howells 			wmem_alloc_delta += copy;
1216c445f31bSPavel Begunkov 		} else if (!zc) {
12171da177e4SLinus Torvalds 			int i = skb_shinfo(skb)->nr_frags;
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds 			err = -ENOMEM;
12205640f768SEric Dumazet 			if (!sk_page_frag_refill(sk, pfrag))
12211da177e4SLinus Torvalds 				goto error;
12221da177e4SLinus Torvalds 
1223c445f31bSPavel Begunkov 			skb_zcopy_downgrade_managed(skb);
12245640f768SEric Dumazet 			if (!skb_can_coalesce(skb, i, pfrag->page,
12255640f768SEric Dumazet 					      pfrag->offset)) {
12261da177e4SLinus Torvalds 				err = -EMSGSIZE;
12275640f768SEric Dumazet 				if (i == MAX_SKB_FRAGS)
12281da177e4SLinus Torvalds 					goto error;
12295640f768SEric Dumazet 
12305640f768SEric Dumazet 				__skb_fill_page_desc(skb, i, pfrag->page,
12315640f768SEric Dumazet 						     pfrag->offset, 0);
12325640f768SEric Dumazet 				skb_shinfo(skb)->nr_frags = ++i;
12335640f768SEric Dumazet 				get_page(pfrag->page);
12341da177e4SLinus Torvalds 			}
12355640f768SEric Dumazet 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
12365640f768SEric Dumazet 			if (getfrag(from,
12375640f768SEric Dumazet 				    page_address(pfrag->page) + pfrag->offset,
12385640f768SEric Dumazet 				    offset, copy, skb->len, skb) < 0)
12395640f768SEric Dumazet 				goto error_efault;
12405640f768SEric Dumazet 
12415640f768SEric Dumazet 			pfrag->offset += copy;
12425640f768SEric Dumazet 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1243ede57d58SRichard Gobert 			skb_len_add(skb, copy);
1244694aba69SEric Dumazet 			wmem_alloc_delta += copy;
1245b5947e5dSWillem de Bruijn 		} else {
1246b5947e5dSWillem de Bruijn 			err = skb_zerocopy_iter_dgram(skb, from, copy);
1247b5947e5dSWillem de Bruijn 			if (err < 0)
1248b5947e5dSWillem de Bruijn 				goto error;
12491da177e4SLinus Torvalds 		}
12501da177e4SLinus Torvalds 		offset += copy;
12511da177e4SLinus Torvalds 		length -= copy;
12521da177e4SLinus Torvalds 	}
12531da177e4SLinus Torvalds 
12549e8445a5SPaolo Abeni 	if (wmem_alloc_delta)
1255694aba69SEric Dumazet 		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
12561da177e4SLinus Torvalds 	return 0;
12571da177e4SLinus Torvalds 
12585640f768SEric Dumazet error_efault:
12595640f768SEric Dumazet 	err = -EFAULT;
12601da177e4SLinus Torvalds error:
12618e044917SJonathan Lemon 	net_zcopy_put_abort(uarg, extra_uref);
12621470ddf7SHerbert Xu 	cork->length -= length;
12635e38e270SPavel Emelyanov 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1264694aba69SEric Dumazet 	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
12651da177e4SLinus Torvalds 	return err;
12661da177e4SLinus Torvalds }
12671da177e4SLinus Torvalds 
12681470ddf7SHerbert Xu static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
12691470ddf7SHerbert Xu 			 struct ipcm_cookie *ipc, struct rtable **rtp)
12701470ddf7SHerbert Xu {
1271f6d8bd05SEric Dumazet 	struct ip_options_rcu *opt;
12721470ddf7SHerbert Xu 	struct rtable *rt;
12731470ddf7SHerbert Xu 
12749783ccd0SGao Feng 	rt = *rtp;
12759783ccd0SGao Feng 	if (unlikely(!rt))
12769783ccd0SGao Feng 		return -EFAULT;
12779783ccd0SGao Feng 
12781470ddf7SHerbert Xu 	/*
12791470ddf7SHerbert Xu 	 * setup for corking.
12801470ddf7SHerbert Xu 	 */
12811470ddf7SHerbert Xu 	opt = ipc->opt;
12821470ddf7SHerbert Xu 	if (opt) {
128351456b29SIan Morris 		if (!cork->opt) {
12841470ddf7SHerbert Xu 			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
12851470ddf7SHerbert Xu 					    sk->sk_allocation);
128651456b29SIan Morris 			if (unlikely(!cork->opt))
12871470ddf7SHerbert Xu 				return -ENOBUFS;
12881470ddf7SHerbert Xu 		}
1289f6d8bd05SEric Dumazet 		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
12901470ddf7SHerbert Xu 		cork->flags |= IPCORK_OPT;
12911470ddf7SHerbert Xu 		cork->addr = ipc->addr;
12921470ddf7SHerbert Xu 	}
12939783ccd0SGao Feng 
1294482fc609SHannes Frederic Sowa 	cork->fragsize = ip_sk_use_pmtu(sk) ?
1295501a90c9SEric Dumazet 			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1296501a90c9SEric Dumazet 
1297501a90c9SEric Dumazet 	if (!inetdev_valid_mtu(cork->fragsize))
1298501a90c9SEric Dumazet 		return -ENETUNREACH;
1299bec1f6f6SWillem de Bruijn 
1300fbf47813SWillem de Bruijn 	cork->gso_size = ipc->gso_size;
1301501a90c9SEric Dumazet 
13021470ddf7SHerbert Xu 	cork->dst = &rt->dst;
1303501a90c9SEric Dumazet 	/* We stole this route, caller should not release it. */
1304501a90c9SEric Dumazet 	*rtp = NULL;
1305501a90c9SEric Dumazet 
13061470ddf7SHerbert Xu 	cork->length = 0;
1307aa661581SFrancesco Fusco 	cork->ttl = ipc->ttl;
1308aa661581SFrancesco Fusco 	cork->tos = ipc->tos;
1309c6af0c22SWillem de Bruijn 	cork->mark = ipc->sockc.mark;
1310aa661581SFrancesco Fusco 	cork->priority = ipc->priority;
1311bc969a97SJesus Sanchez-Palencia 	cork->transmit_time = ipc->sockc.transmit_time;
1312678ca42dSWillem de Bruijn 	cork->tx_flags = 0;
1313678ca42dSWillem de Bruijn 	sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
13141470ddf7SHerbert Xu 
13151470ddf7SHerbert Xu 	return 0;
13161470ddf7SHerbert Xu }
13171470ddf7SHerbert Xu 
13181470ddf7SHerbert Xu /*
1319c49cf266SDavid Howells  *	ip_append_data() can make one large IP datagram from many pieces of
1320c49cf266SDavid Howells  *	data.  Each piece will be held on the socket until
1321c49cf266SDavid Howells  *	ip_push_pending_frames() is called. Each piece can be a page or
1322c49cf266SDavid Howells  *	non-page data.
13231470ddf7SHerbert Xu  *
13241470ddf7SHerbert Xu  *	Not only UDP, other transport protocols - e.g. raw sockets - can use
13251470ddf7SHerbert Xu  *	this interface potentially.
13261470ddf7SHerbert Xu  *
13271470ddf7SHerbert Xu  *	LATER: length must be adjusted by pad at tail, when it is required.
13281470ddf7SHerbert Xu  */
1329f5fca608SDavid S. Miller int ip_append_data(struct sock *sk, struct flowi4 *fl4,
13301470ddf7SHerbert Xu 		   int getfrag(void *from, char *to, int offset, int len,
13311470ddf7SHerbert Xu 			       int odd, struct sk_buff *skb),
13321470ddf7SHerbert Xu 		   void *from, int length, int transhdrlen,
13331470ddf7SHerbert Xu 		   struct ipcm_cookie *ipc, struct rtable **rtp,
13341470ddf7SHerbert Xu 		   unsigned int flags)
13351470ddf7SHerbert Xu {
13361470ddf7SHerbert Xu 	struct inet_sock *inet = inet_sk(sk);
13371470ddf7SHerbert Xu 	int err;
13381470ddf7SHerbert Xu 
13391470ddf7SHerbert Xu 	if (flags&MSG_PROBE)
13401470ddf7SHerbert Xu 		return 0;
13411470ddf7SHerbert Xu 
13421470ddf7SHerbert Xu 	if (skb_queue_empty(&sk->sk_write_queue)) {
1343bdc712b4SDavid S. Miller 		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
13441470ddf7SHerbert Xu 		if (err)
13451470ddf7SHerbert Xu 			return err;
13461470ddf7SHerbert Xu 	} else {
13471470ddf7SHerbert Xu 		transhdrlen = 0;
13481470ddf7SHerbert Xu 	}
13491470ddf7SHerbert Xu 
13505640f768SEric Dumazet 	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
13515640f768SEric Dumazet 				sk_page_frag(sk), getfrag,
13521470ddf7SHerbert Xu 				from, length, transhdrlen, flags);
13531470ddf7SHerbert Xu }
13541470ddf7SHerbert Xu 
13551470ddf7SHerbert Xu static void ip_cork_release(struct inet_cork *cork)
1356429f08e9SPavel Emelyanov {
13571470ddf7SHerbert Xu 	cork->flags &= ~IPCORK_OPT;
13581470ddf7SHerbert Xu 	kfree(cork->opt);
13591470ddf7SHerbert Xu 	cork->opt = NULL;
13601470ddf7SHerbert Xu 	dst_release(cork->dst);
13611470ddf7SHerbert Xu 	cork->dst = NULL;
1362429f08e9SPavel Emelyanov }
1363429f08e9SPavel Emelyanov 
13641da177e4SLinus Torvalds /*
13651da177e4SLinus Torvalds  *	Combined all pending IP fragments on the socket as one IP datagram
13661da177e4SLinus Torvalds  *	and push them out.
13671da177e4SLinus Torvalds  */
13681c32c5adSHerbert Xu struct sk_buff *__ip_make_skb(struct sock *sk,
136977968b78SDavid S. Miller 			      struct flowi4 *fl4,
13701470ddf7SHerbert Xu 			      struct sk_buff_head *queue,
13711470ddf7SHerbert Xu 			      struct inet_cork *cork)
13721da177e4SLinus Torvalds {
13731da177e4SLinus Torvalds 	struct sk_buff *skb, *tmp_skb;
13741da177e4SLinus Torvalds 	struct sk_buff **tail_skb;
13751da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
13760388b004SPavel Emelyanov 	struct net *net = sock_net(sk);
13771da177e4SLinus Torvalds 	struct ip_options *opt = NULL;
13781470ddf7SHerbert Xu 	struct rtable *rt = (struct rtable *)cork->dst;
13791da177e4SLinus Torvalds 	struct iphdr *iph;
138076ab608dSAlexey Dobriyan 	__be16 df = 0;
13811da177e4SLinus Torvalds 	__u8 ttl;
13821da177e4SLinus Torvalds 
138351456b29SIan Morris 	skb = __skb_dequeue(queue);
138451456b29SIan Morris 	if (!skb)
13851da177e4SLinus Torvalds 		goto out;
13861da177e4SLinus Torvalds 	tail_skb = &(skb_shinfo(skb)->frag_list);
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 	/* move skb->data to ip header from ext header */
1389d56f90a7SArnaldo Carvalho de Melo 	if (skb->data < skb_network_header(skb))
1390bbe735e4SArnaldo Carvalho de Melo 		__skb_pull(skb, skb_network_offset(skb));
13911470ddf7SHerbert Xu 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1392cfe1fc77SArnaldo Carvalho de Melo 		__skb_pull(tmp_skb, skb_network_header_len(skb));
13931da177e4SLinus Torvalds 		*tail_skb = tmp_skb;
13941da177e4SLinus Torvalds 		tail_skb = &(tmp_skb->next);
13951da177e4SLinus Torvalds 		skb->len += tmp_skb->len;
13961da177e4SLinus Torvalds 		skb->data_len += tmp_skb->len;
13971da177e4SLinus Torvalds 		skb->truesize += tmp_skb->truesize;
13981da177e4SLinus Torvalds 		tmp_skb->destructor = NULL;
13991da177e4SLinus Torvalds 		tmp_skb->sk = NULL;
14001da177e4SLinus Torvalds 	}
14011da177e4SLinus Torvalds 
14021da177e4SLinus Torvalds 	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
14031da177e4SLinus Torvalds 	 * to fragment the frame generated here. No matter, what transforms
14041da177e4SLinus Torvalds 	 * how transforms change size of the packet, it will come out.
14051da177e4SLinus Torvalds 	 */
140660ff7467SWANG Cong 	skb->ignore_df = ip_sk_ignore_df(sk);
14071da177e4SLinus Torvalds 
14081da177e4SLinus Torvalds 	/* DF bit is set when we want to see DF on outgoing frames.
140960ff7467SWANG Cong 	 * If ignore_df is set too, we still allow to fragment this frame
14101da177e4SLinus Torvalds 	 * locally. */
1411482fc609SHannes Frederic Sowa 	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1412482fc609SHannes Frederic Sowa 	    inet->pmtudisc == IP_PMTUDISC_PROBE ||
1413d8d1f30bSChangli Gao 	    (skb->len <= dst_mtu(&rt->dst) &&
1414d8d1f30bSChangli Gao 	     ip_dont_fragment(sk, &rt->dst)))
14151da177e4SLinus Torvalds 		df = htons(IP_DF);
14161da177e4SLinus Torvalds 
14171470ddf7SHerbert Xu 	if (cork->flags & IPCORK_OPT)
14181470ddf7SHerbert Xu 		opt = cork->opt;
14191da177e4SLinus Torvalds 
1420aa661581SFrancesco Fusco 	if (cork->ttl != 0)
1421aa661581SFrancesco Fusco 		ttl = cork->ttl;
1422aa661581SFrancesco Fusco 	else if (rt->rt_type == RTN_MULTICAST)
14231da177e4SLinus Torvalds 		ttl = inet->mc_ttl;
14241da177e4SLinus Torvalds 	else
1425d8d1f30bSChangli Gao 		ttl = ip_select_ttl(inet, &rt->dst);
14261da177e4SLinus Torvalds 
1427749154aaSAnsis Atteka 	iph = ip_hdr(skb);
14281da177e4SLinus Torvalds 	iph->version = 4;
14291da177e4SLinus Torvalds 	iph->ihl = 5;
1430aa661581SFrancesco Fusco 	iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
14311da177e4SLinus Torvalds 	iph->frag_off = df;
14321da177e4SLinus Torvalds 	iph->ttl = ttl;
14331da177e4SLinus Torvalds 	iph->protocol = sk->sk_protocol;
143484f9307cSEric Dumazet 	ip_copy_addrs(iph, fl4);
1435b6a7719aSHannes Frederic Sowa 	ip_select_ident(net, skb, sk);
14361da177e4SLinus Torvalds 
143722f728f8SDavid S. Miller 	if (opt) {
143822f728f8SDavid S. Miller 		iph->ihl += opt->optlen >> 2;
14394f0e3040SJakub Kicinski 		ip_options_build(skb, opt, cork->addr, rt);
144022f728f8SDavid S. Miller 	}
144122f728f8SDavid S. Miller 
1442aa661581SFrancesco Fusco 	skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1443c6af0c22SWillem de Bruijn 	skb->mark = cork->mark;
1444bc969a97SJesus Sanchez-Palencia 	skb->tstamp = cork->transmit_time;
1445a21bba94SEric Dumazet 	/*
1446a21bba94SEric Dumazet 	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1447a21bba94SEric Dumazet 	 * on dst refcount
1448a21bba94SEric Dumazet 	 */
14491470ddf7SHerbert Xu 	cork->dst = NULL;
1450d8d1f30bSChangli Gao 	skb_dst_set(skb, &rt->dst);
14511da177e4SLinus Torvalds 
145299e5acaeSZiyang Xuan 	if (iph->protocol == IPPROTO_ICMP) {
145399e5acaeSZiyang Xuan 		u8 icmp_type;
145499e5acaeSZiyang Xuan 
145599e5acaeSZiyang Xuan 		/* For such sockets, transhdrlen is zero when do ip_append_data(),
145699e5acaeSZiyang Xuan 		 * so icmphdr does not in skb linear region and can not get icmp_type
145799e5acaeSZiyang Xuan 		 * by icmp_hdr(skb)->type.
145899e5acaeSZiyang Xuan 		 */
145999e5acaeSZiyang Xuan 		if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
146099e5acaeSZiyang Xuan 			icmp_type = fl4->fl4_icmp_type;
146199e5acaeSZiyang Xuan 		else
146299e5acaeSZiyang Xuan 			icmp_type = icmp_hdr(skb)->type;
146399e5acaeSZiyang Xuan 		icmp_out_count(net, icmp_type);
146499e5acaeSZiyang Xuan 	}
146596793b48SDavid L Stevens 
14661c32c5adSHerbert Xu 	ip_cork_release(cork);
14671c32c5adSHerbert Xu out:
14681c32c5adSHerbert Xu 	return skb;
14691c32c5adSHerbert Xu }
14701c32c5adSHerbert Xu 
1471b5ec8eeaSEric Dumazet int ip_send_skb(struct net *net, struct sk_buff *skb)
14721c32c5adSHerbert Xu {
14731c32c5adSHerbert Xu 	int err;
14741c32c5adSHerbert Xu 
147533224b16SEric W. Biederman 	err = ip_local_out(net, skb->sk, skb);
14761da177e4SLinus Torvalds 	if (err) {
14771da177e4SLinus Torvalds 		if (err > 0)
14786ce9e7b5SEric Dumazet 			err = net_xmit_errno(err);
14791da177e4SLinus Torvalds 		if (err)
14801c32c5adSHerbert Xu 			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
14811da177e4SLinus Torvalds 	}
14821da177e4SLinus Torvalds 
14831da177e4SLinus Torvalds 	return err;
14841da177e4SLinus Torvalds }
14851da177e4SLinus Torvalds 
148677968b78SDavid S. Miller int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
14871470ddf7SHerbert Xu {
14881c32c5adSHerbert Xu 	struct sk_buff *skb;
14891c32c5adSHerbert Xu 
149077968b78SDavid S. Miller 	skb = ip_finish_skb(sk, fl4);
14911c32c5adSHerbert Xu 	if (!skb)
14921c32c5adSHerbert Xu 		return 0;
14931c32c5adSHerbert Xu 
14941c32c5adSHerbert Xu 	/* Netfilter gets whole the not fragmented skb. */
1495b5ec8eeaSEric Dumazet 	return ip_send_skb(sock_net(sk), skb);
14961470ddf7SHerbert Xu }
14971470ddf7SHerbert Xu 
14981da177e4SLinus Torvalds /*
14991da177e4SLinus Torvalds  *	Throw away all pending data on the socket.
15001da177e4SLinus Torvalds  */
15011470ddf7SHerbert Xu static void __ip_flush_pending_frames(struct sock *sk,
15021470ddf7SHerbert Xu 				      struct sk_buff_head *queue,
15031470ddf7SHerbert Xu 				      struct inet_cork *cork)
15041da177e4SLinus Torvalds {
15051da177e4SLinus Torvalds 	struct sk_buff *skb;
15061da177e4SLinus Torvalds 
15071470ddf7SHerbert Xu 	while ((skb = __skb_dequeue_tail(queue)) != NULL)
15081da177e4SLinus Torvalds 		kfree_skb(skb);
15091da177e4SLinus Torvalds 
15101470ddf7SHerbert Xu 	ip_cork_release(cork);
15111470ddf7SHerbert Xu }
15121470ddf7SHerbert Xu 
15131470ddf7SHerbert Xu void ip_flush_pending_frames(struct sock *sk)
15141470ddf7SHerbert Xu {
1515bdc712b4SDavid S. Miller 	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
15161da177e4SLinus Torvalds }
15171da177e4SLinus Torvalds 
15181c32c5adSHerbert Xu struct sk_buff *ip_make_skb(struct sock *sk,
151977968b78SDavid S. Miller 			    struct flowi4 *fl4,
15201c32c5adSHerbert Xu 			    int getfrag(void *from, char *to, int offset,
15211c32c5adSHerbert Xu 					int len, int odd, struct sk_buff *skb),
15221c32c5adSHerbert Xu 			    void *from, int length, int transhdrlen,
15231c32c5adSHerbert Xu 			    struct ipcm_cookie *ipc, struct rtable **rtp,
15241cd7884dSWillem de Bruijn 			    struct inet_cork *cork, unsigned int flags)
15251c32c5adSHerbert Xu {
15261c32c5adSHerbert Xu 	struct sk_buff_head queue;
15271c32c5adSHerbert Xu 	int err;
15281c32c5adSHerbert Xu 
15291c32c5adSHerbert Xu 	if (flags & MSG_PROBE)
15301c32c5adSHerbert Xu 		return NULL;
15311c32c5adSHerbert Xu 
15321c32c5adSHerbert Xu 	__skb_queue_head_init(&queue);
15331c32c5adSHerbert Xu 
15341cd7884dSWillem de Bruijn 	cork->flags = 0;
15351cd7884dSWillem de Bruijn 	cork->addr = 0;
15361cd7884dSWillem de Bruijn 	cork->opt = NULL;
15371cd7884dSWillem de Bruijn 	err = ip_setup_cork(sk, cork, ipc, rtp);
15381c32c5adSHerbert Xu 	if (err)
15391c32c5adSHerbert Xu 		return ERR_PTR(err);
15401c32c5adSHerbert Xu 
15411cd7884dSWillem de Bruijn 	err = __ip_append_data(sk, fl4, &queue, cork,
15425640f768SEric Dumazet 			       &current->task_frag, getfrag,
15431c32c5adSHerbert Xu 			       from, length, transhdrlen, flags);
15441c32c5adSHerbert Xu 	if (err) {
15451cd7884dSWillem de Bruijn 		__ip_flush_pending_frames(sk, &queue, cork);
15461c32c5adSHerbert Xu 		return ERR_PTR(err);
15471c32c5adSHerbert Xu 	}
15481c32c5adSHerbert Xu 
15491cd7884dSWillem de Bruijn 	return __ip_make_skb(sk, fl4, &queue, cork);
15501c32c5adSHerbert Xu }
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds /*
15531da177e4SLinus Torvalds  *	Fetch data from kernel space and fill in checksum if needed.
15541da177e4SLinus Torvalds  */
15551da177e4SLinus Torvalds static int ip_reply_glue_bits(void *dptr, char *to, int offset,
15561da177e4SLinus Torvalds 			      int len, int odd, struct sk_buff *skb)
15571da177e4SLinus Torvalds {
15585084205fSAl Viro 	__wsum csum;
15591da177e4SLinus Torvalds 
1560cc44c17bSAl Viro 	csum = csum_partial_copy_nocheck(dptr+offset, to, len);
15611da177e4SLinus Torvalds 	skb->csum = csum_block_add(skb->csum, csum, odd);
15621da177e4SLinus Torvalds 	return 0;
15631da177e4SLinus Torvalds }
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds /*
15661da177e4SLinus Torvalds  *	Generic function to send a packet as reply to another packet.
1567be9f4a44SEric Dumazet  *	Used to send some TCP resets/acks so far.
15681da177e4SLinus Torvalds  */
1569bdbbb852SEric Dumazet void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
157024a2d43dSEric Dumazet 			   const struct ip_options *sopt,
157124a2d43dSEric Dumazet 			   __be32 daddr, __be32 saddr,
157224a2d43dSEric Dumazet 			   const struct ip_reply_arg *arg,
1573*c0a8966eSAntoine Tenart 			   unsigned int len, u64 transmit_time, u32 txhash)
15741da177e4SLinus Torvalds {
1575f6d8bd05SEric Dumazet 	struct ip_options_data replyopts;
15761da177e4SLinus Torvalds 	struct ipcm_cookie ipc;
157777968b78SDavid S. Miller 	struct flowi4 fl4;
1578511c3f92SEric Dumazet 	struct rtable *rt = skb_rtable(skb);
1579bdbbb852SEric Dumazet 	struct net *net = sock_net(sk);
1580be9f4a44SEric Dumazet 	struct sk_buff *nskb;
15814062090eSVasily Averin 	int err;
1582f7ba868bSDavid Ahern 	int oif;
15831da177e4SLinus Torvalds 
158491ed1e66SPaolo Abeni 	if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
15851da177e4SLinus Torvalds 		return;
15861da177e4SLinus Torvalds 
158735178206SWillem de Bruijn 	ipcm_init(&ipc);
15880a5ebb80SDavid S. Miller 	ipc.addr = daddr;
1589d6fb396cSEric Dumazet 	ipc.sockc.transmit_time = transmit_time;
15901da177e4SLinus Torvalds 
1591f6d8bd05SEric Dumazet 	if (replyopts.opt.opt.optlen) {
15921da177e4SLinus Torvalds 		ipc.opt = &replyopts.opt;
15931da177e4SLinus Torvalds 
1594f6d8bd05SEric Dumazet 		if (replyopts.opt.opt.srr)
1595f6d8bd05SEric Dumazet 			daddr = replyopts.opt.opt.faddr;
15961da177e4SLinus Torvalds 	}
15971da177e4SLinus Torvalds 
1598f7ba868bSDavid Ahern 	oif = arg->bound_dev_if;
15999b6c14d5SDavid Ahern 	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
16009b6c14d5SDavid Ahern 		oif = skb->skb_iif;
1601f7ba868bSDavid Ahern 
1602f7ba868bSDavid Ahern 	flowi4_init_output(&fl4, oif,
160300483690SJon Maxwell 			   IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
160466b13d99SEric Dumazet 			   RT_TOS(arg->tos),
1605be9f4a44SEric Dumazet 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1606538de0e0SDavid S. Miller 			   ip_reply_arg_flowi_flags(arg),
160770e73416SDavid S. Miller 			   daddr, saddr,
1608e2d118a1SLorenzo Colitti 			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1609e2d118a1SLorenzo Colitti 			   arg->uid);
16103df98d79SPaul Moore 	security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1611e22aa148Ssewookseo 	rt = ip_route_output_flow(net, &fl4, sk);
1612b23dd4feSDavid S. Miller 	if (IS_ERR(rt))
16131da177e4SLinus Torvalds 		return;
16141da177e4SLinus Torvalds 
1615ba9e04a7SWei Wang 	inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
16161da177e4SLinus Torvalds 
1617eddc9ec5SArnaldo Carvalho de Melo 	sk->sk_protocol = ip_hdr(skb)->protocol;
1618f0e48dbfSPatrick McHardy 	sk->sk_bound_dev_if = arg->bound_dev_if;
16191227c177SKuniyuki Iwashima 	sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
16200da7536fSWillem de Bruijn 	ipc.sockc.mark = fl4.flowi4_mark;
16214062090eSVasily Averin 	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
16224062090eSVasily Averin 			     len, 0, &ipc, &rt, MSG_DONTWAIT);
16234062090eSVasily Averin 	if (unlikely(err)) {
16244062090eSVasily Averin 		ip_flush_pending_frames(sk);
16254062090eSVasily Averin 		goto out;
16264062090eSVasily Averin 	}
16274062090eSVasily Averin 
1628be9f4a44SEric Dumazet 	nskb = skb_peek(&sk->sk_write_queue);
1629be9f4a44SEric Dumazet 	if (nskb) {
16301da177e4SLinus Torvalds 		if (arg->csumoffset >= 0)
1631be9f4a44SEric Dumazet 			*((__sum16 *)skb_transport_header(nskb) +
1632be9f4a44SEric Dumazet 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
16339c70220bSArnaldo Carvalho de Melo 								arg->csum));
1634be9f4a44SEric Dumazet 		nskb->ip_summed = CHECKSUM_NONE;
1635d98d58a0SMartin KaFai Lau 		nskb->mono_delivery_time = !!transmit_time;
1636*c0a8966eSAntoine Tenart 		if (txhash)
1637*c0a8966eSAntoine Tenart 			skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
163877968b78SDavid S. Miller 		ip_push_pending_frames(sk, &fl4);
16391da177e4SLinus Torvalds 	}
16404062090eSVasily Averin out:
16411da177e4SLinus Torvalds 	ip_rt_put(rt);
16421da177e4SLinus Torvalds }
16431da177e4SLinus Torvalds 
16441da177e4SLinus Torvalds void __init ip_init(void)
16451da177e4SLinus Torvalds {
16461da177e4SLinus Torvalds 	ip_rt_init();
16471da177e4SLinus Torvalds 	inet_initpeers();
16481da177e4SLinus Torvalds 
164972c1d3bdSWANG Cong #if defined(CONFIG_IP_MULTICAST)
165072c1d3bdSWANG Cong 	igmp_mc_init();
16511da177e4SLinus Torvalds #endif
16521da177e4SLinus Torvalds }
1653