xref: /openbmc/linux/net/core/skbuff.c (revision 97550f6f)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	Routines having to do with the 'struct sk_buff' memory handlers.
41da177e4SLinus Torvalds  *
5113aa838SAlan Cox  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
61da177e4SLinus Torvalds  *			Florian La Roche <rzsfl@rz.uni-sb.de>
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *	Fixes:
91da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the worst of the load
101da177e4SLinus Torvalds  *					balancer bugs.
111da177e4SLinus Torvalds  *		Dave Platt	:	Interrupt stacking fix.
121da177e4SLinus Torvalds  *	Richard Kooijman	:	Timestamp fixes.
131da177e4SLinus Torvalds  *		Alan Cox	:	Changed buffer format.
141da177e4SLinus Torvalds  *		Alan Cox	:	destructor hook for AF_UNIX etc.
151da177e4SLinus Torvalds  *		Linus Torvalds	:	Better skb_clone.
161da177e4SLinus Torvalds  *		Alan Cox	:	Added skb_copy.
171da177e4SLinus Torvalds  *		Alan Cox	:	Added all the changed routines Linus
181da177e4SLinus Torvalds  *					only put in the headers
191da177e4SLinus Torvalds  *		Ray VanTassle	:	Fixed --skb->lock in free
201da177e4SLinus Torvalds  *		Alan Cox	:	skb_copy copy arp field
211da177e4SLinus Torvalds  *		Andi Kleen	:	slabified it.
221da177e4SLinus Torvalds  *		Robert Olsson	:	Removed skb_head_pool
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *	NOTE:
251da177e4SLinus Torvalds  *		The __skb_ routines should be called with interrupts
261da177e4SLinus Torvalds  *	disabled, or you better be *real* sure that the operation is atomic
271da177e4SLinus Torvalds  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
281da177e4SLinus Torvalds  *	or via disabling bottom half handlers, etc).
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds /*
321da177e4SLinus Torvalds  *	The functions in this file will not compile correctly with gcc 2.4.x
331da177e4SLinus Torvalds  */
341da177e4SLinus Torvalds 
35e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36e005d193SJoe Perches 
371da177e4SLinus Torvalds #include <linux/module.h>
381da177e4SLinus Torvalds #include <linux/types.h>
391da177e4SLinus Torvalds #include <linux/kernel.h>
401da177e4SLinus Torvalds #include <linux/mm.h>
411da177e4SLinus Torvalds #include <linux/interrupt.h>
421da177e4SLinus Torvalds #include <linux/in.h>
431da177e4SLinus Torvalds #include <linux/inet.h>
441da177e4SLinus Torvalds #include <linux/slab.h>
45de960aa9SFlorian Westphal #include <linux/tcp.h>
46de960aa9SFlorian Westphal #include <linux/udp.h>
4790017accSMarcelo Ricardo Leitner #include <linux/sctp.h>
481da177e4SLinus Torvalds #include <linux/netdevice.h>
491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT
501da177e4SLinus Torvalds #include <net/pkt_sched.h>
511da177e4SLinus Torvalds #endif
521da177e4SLinus Torvalds #include <linux/string.h>
531da177e4SLinus Torvalds #include <linux/skbuff.h>
549c55e01cSJens Axboe #include <linux/splice.h>
551da177e4SLinus Torvalds #include <linux/cache.h>
561da177e4SLinus Torvalds #include <linux/rtnetlink.h>
571da177e4SLinus Torvalds #include <linux/init.h>
58716ea3a7SDavid Howells #include <linux/scatterlist.h>
59ac45f602SPatrick Ohly #include <linux/errqueue.h>
60268bb0ceSLinus Torvalds #include <linux/prefetch.h>
610d5501c1SVlad Yasevich #include <linux/if_vlan.h>
622a2ea508SJohn Hurley #include <linux/mpls.h>
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds #include <net/protocol.h>
651da177e4SLinus Torvalds #include <net/dst.h>
661da177e4SLinus Torvalds #include <net/sock.h>
671da177e4SLinus Torvalds #include <net/checksum.h>
68ed1f50c3SPaul Durrant #include <net/ip6_checksum.h>
691da177e4SLinus Torvalds #include <net/xfrm.h>
708822e270SJohn Hurley #include <net/mpls.h>
713ee17bc7SMat Martineau #include <net/mptcp.h>
721da177e4SLinus Torvalds 
737c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
74ad8d75ffSSteven Rostedt #include <trace/events/skb.h>
7551c56b00SEric Dumazet #include <linux/highmem.h>
76b245be1fSWillem de Bruijn #include <linux/capability.h>
77b245be1fSWillem de Bruijn #include <linux/user_namespace.h>
782544af03SMatteo Croce #include <linux/indirect_call_wrapper.h>
79a1f8e7f7SAl Viro 
807b7ed885SBart Van Assche #include "datagram.h"
817b7ed885SBart Van Assche 
8208009a76SAlexey Dobriyan struct kmem_cache *skbuff_head_cache __ro_after_init;
8308009a76SAlexey Dobriyan static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
84df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
85df5042f4SFlorian Westphal static struct kmem_cache *skbuff_ext_cache __ro_after_init;
86df5042f4SFlorian Westphal #endif
875f74f82eSHans Westgaard Ry int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
885f74f82eSHans Westgaard Ry EXPORT_SYMBOL(sysctl_max_skb_frags);
891da177e4SLinus Torvalds 
901da177e4SLinus Torvalds /**
91f05de73bSJean Sacren  *	skb_panic - private function for out-of-line support
921da177e4SLinus Torvalds  *	@skb:	buffer
931da177e4SLinus Torvalds  *	@sz:	size
94f05de73bSJean Sacren  *	@addr:	address
9599d5851eSJames Hogan  *	@msg:	skb_over_panic or skb_under_panic
961da177e4SLinus Torvalds  *
97f05de73bSJean Sacren  *	Out-of-line support for skb_put() and skb_push().
98f05de73bSJean Sacren  *	Called via the wrapper skb_over_panic() or skb_under_panic().
99f05de73bSJean Sacren  *	Keep out of line to prevent kernel bloat.
100f05de73bSJean Sacren  *	__builtin_return_address is not used because it is not always reliable.
1011da177e4SLinus Torvalds  */
102f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
10399d5851eSJames Hogan 		      const char msg[])
1041da177e4SLinus Torvalds {
10541a46913SJesper Dangaard Brouer 	pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
10699d5851eSJames Hogan 		 msg, addr, skb->len, sz, skb->head, skb->data,
1074305b541SArnaldo Carvalho de Melo 		 (unsigned long)skb->tail, (unsigned long)skb->end,
10826095455SPatrick McHardy 		 skb->dev ? skb->dev->name : "<NULL>");
1091da177e4SLinus Torvalds 	BUG();
1101da177e4SLinus Torvalds }
1111da177e4SLinus Torvalds 
112f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
1131da177e4SLinus Torvalds {
114f05de73bSJean Sacren 	skb_panic(skb, sz, addr, __func__);
1151da177e4SLinus Torvalds }
1161da177e4SLinus Torvalds 
117f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
118f05de73bSJean Sacren {
119f05de73bSJean Sacren 	skb_panic(skb, sz, addr, __func__);
120f05de73bSJean Sacren }
121c93bdd0eSMel Gorman 
122c93bdd0eSMel Gorman /*
123c93bdd0eSMel Gorman  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
124c93bdd0eSMel Gorman  * the caller if emergency pfmemalloc reserves are being used. If it is and
125c93bdd0eSMel Gorman  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
126c93bdd0eSMel Gorman  * may be used. Otherwise, the packet data may be discarded until enough
127c93bdd0eSMel Gorman  * memory is free
128c93bdd0eSMel Gorman  */
129c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
130c93bdd0eSMel Gorman 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
13161c5e88aSstephen hemminger 
13261c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
13361c5e88aSstephen hemminger 			       unsigned long ip, bool *pfmemalloc)
134c93bdd0eSMel Gorman {
135c93bdd0eSMel Gorman 	void *obj;
136c93bdd0eSMel Gorman 	bool ret_pfmemalloc = false;
137c93bdd0eSMel Gorman 
138c93bdd0eSMel Gorman 	/*
139c93bdd0eSMel Gorman 	 * Try a regular allocation, when that fails and we're not entitled
140c93bdd0eSMel Gorman 	 * to the reserves, fail.
141c93bdd0eSMel Gorman 	 */
142c93bdd0eSMel Gorman 	obj = kmalloc_node_track_caller(size,
143c93bdd0eSMel Gorman 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
144c93bdd0eSMel Gorman 					node);
145c93bdd0eSMel Gorman 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
146c93bdd0eSMel Gorman 		goto out;
147c93bdd0eSMel Gorman 
148c93bdd0eSMel Gorman 	/* Try again but now we are using pfmemalloc reserves */
149c93bdd0eSMel Gorman 	ret_pfmemalloc = true;
150c93bdd0eSMel Gorman 	obj = kmalloc_node_track_caller(size, flags, node);
151c93bdd0eSMel Gorman 
152c93bdd0eSMel Gorman out:
153c93bdd0eSMel Gorman 	if (pfmemalloc)
154c93bdd0eSMel Gorman 		*pfmemalloc = ret_pfmemalloc;
155c93bdd0eSMel Gorman 
156c93bdd0eSMel Gorman 	return obj;
157c93bdd0eSMel Gorman }
158c93bdd0eSMel Gorman 
1591da177e4SLinus Torvalds /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
1601da177e4SLinus Torvalds  *	'private' fields and also do memory statistics to find all the
1611da177e4SLinus Torvalds  *	[BEEP] leaks.
1621da177e4SLinus Torvalds  *
1631da177e4SLinus Torvalds  */
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds /**
166d179cd12SDavid S. Miller  *	__alloc_skb	-	allocate a network buffer
1671da177e4SLinus Torvalds  *	@size: size to allocate
1681da177e4SLinus Torvalds  *	@gfp_mask: allocation mask
169c93bdd0eSMel Gorman  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
170c93bdd0eSMel Gorman  *		instead of head cache and allocate a cloned (child) skb.
171c93bdd0eSMel Gorman  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
172c93bdd0eSMel Gorman  *		allocations in case the data is required for writeback
173b30973f8SChristoph Hellwig  *	@node: numa node to allocate memory on
1741da177e4SLinus Torvalds  *
1751da177e4SLinus Torvalds  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
17694b6042cSBen Hutchings  *	tail room of at least size bytes. The object has a reference count
17794b6042cSBen Hutchings  *	of one. The return is the buffer. On a failure the return is %NULL.
1781da177e4SLinus Torvalds  *
1791da177e4SLinus Torvalds  *	Buffers may only be allocated from interrupts using a @gfp_mask of
1801da177e4SLinus Torvalds  *	%GFP_ATOMIC.
1811da177e4SLinus Torvalds  */
182dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
183c93bdd0eSMel Gorman 			    int flags, int node)
1841da177e4SLinus Torvalds {
185e18b890bSChristoph Lameter 	struct kmem_cache *cache;
1864947d3efSBenjamin LaHaise 	struct skb_shared_info *shinfo;
1871da177e4SLinus Torvalds 	struct sk_buff *skb;
1881da177e4SLinus Torvalds 	u8 *data;
189c93bdd0eSMel Gorman 	bool pfmemalloc;
1901da177e4SLinus Torvalds 
191c93bdd0eSMel Gorman 	cache = (flags & SKB_ALLOC_FCLONE)
192c93bdd0eSMel Gorman 		? skbuff_fclone_cache : skbuff_head_cache;
193c93bdd0eSMel Gorman 
194c93bdd0eSMel Gorman 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
195c93bdd0eSMel Gorman 		gfp_mask |= __GFP_MEMALLOC;
1968798b3fbSHerbert Xu 
1971da177e4SLinus Torvalds 	/* Get the HEAD */
198b30973f8SChristoph Hellwig 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
1991da177e4SLinus Torvalds 	if (!skb)
2001da177e4SLinus Torvalds 		goto out;
201ec7d2f2cSEric Dumazet 	prefetchw(skb);
2021da177e4SLinus Torvalds 
20387fb4b7bSEric Dumazet 	/* We do our best to align skb_shared_info on a separate cache
20487fb4b7bSEric Dumazet 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
20587fb4b7bSEric Dumazet 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
20687fb4b7bSEric Dumazet 	 * Both skb->head and skb_shared_info are cache line aligned.
20787fb4b7bSEric Dumazet 	 */
208bc417e30STony Lindgren 	size = SKB_DATA_ALIGN(size);
20987fb4b7bSEric Dumazet 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
210c93bdd0eSMel Gorman 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
2111da177e4SLinus Torvalds 	if (!data)
2121da177e4SLinus Torvalds 		goto nodata;
21387fb4b7bSEric Dumazet 	/* kmalloc(size) might give us more room than requested.
21487fb4b7bSEric Dumazet 	 * Put skb_shared_info exactly at the end of allocated zone,
21587fb4b7bSEric Dumazet 	 * to allow max possible filling before reallocation.
21687fb4b7bSEric Dumazet 	 */
21787fb4b7bSEric Dumazet 	size = SKB_WITH_OVERHEAD(ksize(data));
218ec7d2f2cSEric Dumazet 	prefetchw(data + size);
2191da177e4SLinus Torvalds 
220ca0605a7SArnaldo Carvalho de Melo 	/*
221c8005785SJohannes Berg 	 * Only clear those fields we need to clear, not those that we will
222c8005785SJohannes Berg 	 * actually initialise below. Hence, don't put any more fields after
223c8005785SJohannes Berg 	 * the tail pointer in struct sk_buff!
224ca0605a7SArnaldo Carvalho de Melo 	 */
225ca0605a7SArnaldo Carvalho de Melo 	memset(skb, 0, offsetof(struct sk_buff, tail));
22687fb4b7bSEric Dumazet 	/* Account for allocated memory : skb + skb->head */
22787fb4b7bSEric Dumazet 	skb->truesize = SKB_TRUESIZE(size);
228c93bdd0eSMel Gorman 	skb->pfmemalloc = pfmemalloc;
22963354797SReshetova, Elena 	refcount_set(&skb->users, 1);
2301da177e4SLinus Torvalds 	skb->head = data;
2311da177e4SLinus Torvalds 	skb->data = data;
23227a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
2334305b541SArnaldo Carvalho de Melo 	skb->end = skb->tail + size;
23435d04610SCong Wang 	skb->mac_header = (typeof(skb->mac_header))~0U;
23535d04610SCong Wang 	skb->transport_header = (typeof(skb->transport_header))~0U;
23619633e12SStephen Hemminger 
2374947d3efSBenjamin LaHaise 	/* make sure we initialize shinfo sequentially */
2384947d3efSBenjamin LaHaise 	shinfo = skb_shinfo(skb);
239ec7d2f2cSEric Dumazet 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
2404947d3efSBenjamin LaHaise 	atomic_set(&shinfo->dataref, 1);
2414947d3efSBenjamin LaHaise 
242c93bdd0eSMel Gorman 	if (flags & SKB_ALLOC_FCLONE) {
243d0bf4a9eSEric Dumazet 		struct sk_buff_fclones *fclones;
2441da177e4SLinus Torvalds 
245d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
246d0bf4a9eSEric Dumazet 
247d179cd12SDavid S. Miller 		skb->fclone = SKB_FCLONE_ORIG;
2482638595aSReshetova, Elena 		refcount_set(&fclones->fclone_ref, 1);
249d179cd12SDavid S. Miller 
2506ffe75ebSEric Dumazet 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
251d179cd12SDavid S. Miller 	}
2526370cc3bSAleksandr Nogikh 
2536370cc3bSAleksandr Nogikh 	skb_set_kcov_handle(skb, kcov_common_handle());
2546370cc3bSAleksandr Nogikh 
2551da177e4SLinus Torvalds out:
2561da177e4SLinus Torvalds 	return skb;
2571da177e4SLinus Torvalds nodata:
2588798b3fbSHerbert Xu 	kmem_cache_free(cache, skb);
2591da177e4SLinus Torvalds 	skb = NULL;
2601da177e4SLinus Torvalds 	goto out;
2611da177e4SLinus Torvalds }
262b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb);
2631da177e4SLinus Torvalds 
264ba0509b6SJesper Dangaard Brouer /* Caller must provide SKB that is memset cleared */
265ba0509b6SJesper Dangaard Brouer static struct sk_buff *__build_skb_around(struct sk_buff *skb,
266ba0509b6SJesper Dangaard Brouer 					  void *data, unsigned int frag_size)
267ba0509b6SJesper Dangaard Brouer {
268ba0509b6SJesper Dangaard Brouer 	struct skb_shared_info *shinfo;
269ba0509b6SJesper Dangaard Brouer 	unsigned int size = frag_size ? : ksize(data);
270ba0509b6SJesper Dangaard Brouer 
271ba0509b6SJesper Dangaard Brouer 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
272ba0509b6SJesper Dangaard Brouer 
273ba0509b6SJesper Dangaard Brouer 	/* Assumes caller memset cleared SKB */
274ba0509b6SJesper Dangaard Brouer 	skb->truesize = SKB_TRUESIZE(size);
275ba0509b6SJesper Dangaard Brouer 	refcount_set(&skb->users, 1);
276ba0509b6SJesper Dangaard Brouer 	skb->head = data;
277ba0509b6SJesper Dangaard Brouer 	skb->data = data;
278ba0509b6SJesper Dangaard Brouer 	skb_reset_tail_pointer(skb);
279ba0509b6SJesper Dangaard Brouer 	skb->end = skb->tail + size;
280ba0509b6SJesper Dangaard Brouer 	skb->mac_header = (typeof(skb->mac_header))~0U;
281ba0509b6SJesper Dangaard Brouer 	skb->transport_header = (typeof(skb->transport_header))~0U;
282ba0509b6SJesper Dangaard Brouer 
283ba0509b6SJesper Dangaard Brouer 	/* make sure we initialize shinfo sequentially */
284ba0509b6SJesper Dangaard Brouer 	shinfo = skb_shinfo(skb);
285ba0509b6SJesper Dangaard Brouer 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
286ba0509b6SJesper Dangaard Brouer 	atomic_set(&shinfo->dataref, 1);
287ba0509b6SJesper Dangaard Brouer 
2886370cc3bSAleksandr Nogikh 	skb_set_kcov_handle(skb, kcov_common_handle());
2896370cc3bSAleksandr Nogikh 
290ba0509b6SJesper Dangaard Brouer 	return skb;
291ba0509b6SJesper Dangaard Brouer }
292ba0509b6SJesper Dangaard Brouer 
2931da177e4SLinus Torvalds /**
2942ea2f62cSEric Dumazet  * __build_skb - build a network buffer
295b2b5ce9dSEric Dumazet  * @data: data buffer provided by caller
2962ea2f62cSEric Dumazet  * @frag_size: size of data, or 0 if head was kmalloced
297b2b5ce9dSEric Dumazet  *
298b2b5ce9dSEric Dumazet  * Allocate a new &sk_buff. Caller provides space holding head and
299deceb4c0SFlorian Fainelli  * skb_shared_info. @data must have been allocated by kmalloc() only if
3002ea2f62cSEric Dumazet  * @frag_size is 0, otherwise data should come from the page allocator
3012ea2f62cSEric Dumazet  *  or vmalloc()
302b2b5ce9dSEric Dumazet  * The return is the new skb buffer.
303b2b5ce9dSEric Dumazet  * On a failure the return is %NULL, and @data is not freed.
304b2b5ce9dSEric Dumazet  * Notes :
305b2b5ce9dSEric Dumazet  *  Before IO, driver allocates only data buffer where NIC put incoming frame
306b2b5ce9dSEric Dumazet  *  Driver should add room at head (NET_SKB_PAD) and
307b2b5ce9dSEric Dumazet  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
308b2b5ce9dSEric Dumazet  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
309b2b5ce9dSEric Dumazet  *  before giving packet to stack.
310b2b5ce9dSEric Dumazet  *  RX rings only contains data buffers, not full skbs.
311b2b5ce9dSEric Dumazet  */
3122ea2f62cSEric Dumazet struct sk_buff *__build_skb(void *data, unsigned int frag_size)
313b2b5ce9dSEric Dumazet {
314b2b5ce9dSEric Dumazet 	struct sk_buff *skb;
315b2b5ce9dSEric Dumazet 
316b2b5ce9dSEric Dumazet 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
317ba0509b6SJesper Dangaard Brouer 	if (unlikely(!skb))
318b2b5ce9dSEric Dumazet 		return NULL;
319b2b5ce9dSEric Dumazet 
320b2b5ce9dSEric Dumazet 	memset(skb, 0, offsetof(struct sk_buff, tail));
321b2b5ce9dSEric Dumazet 
322ba0509b6SJesper Dangaard Brouer 	return __build_skb_around(skb, data, frag_size);
323b2b5ce9dSEric Dumazet }
3242ea2f62cSEric Dumazet 
3252ea2f62cSEric Dumazet /* build_skb() is wrapper over __build_skb(), that specifically
3262ea2f62cSEric Dumazet  * takes care of skb->head and skb->pfmemalloc
3272ea2f62cSEric Dumazet  * This means that if @frag_size is not zero, then @data must be backed
3282ea2f62cSEric Dumazet  * by a page fragment, not kmalloc() or vmalloc()
3292ea2f62cSEric Dumazet  */
3302ea2f62cSEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size)
3312ea2f62cSEric Dumazet {
3322ea2f62cSEric Dumazet 	struct sk_buff *skb = __build_skb(data, frag_size);
3332ea2f62cSEric Dumazet 
3342ea2f62cSEric Dumazet 	if (skb && frag_size) {
3352ea2f62cSEric Dumazet 		skb->head_frag = 1;
3362f064f34SMichal Hocko 		if (page_is_pfmemalloc(virt_to_head_page(data)))
3372ea2f62cSEric Dumazet 			skb->pfmemalloc = 1;
3382ea2f62cSEric Dumazet 	}
3392ea2f62cSEric Dumazet 	return skb;
3402ea2f62cSEric Dumazet }
341b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb);
342b2b5ce9dSEric Dumazet 
343ba0509b6SJesper Dangaard Brouer /**
344ba0509b6SJesper Dangaard Brouer  * build_skb_around - build a network buffer around provided skb
345ba0509b6SJesper Dangaard Brouer  * @skb: sk_buff provide by caller, must be memset cleared
346ba0509b6SJesper Dangaard Brouer  * @data: data buffer provided by caller
347ba0509b6SJesper Dangaard Brouer  * @frag_size: size of data, or 0 if head was kmalloced
348ba0509b6SJesper Dangaard Brouer  */
349ba0509b6SJesper Dangaard Brouer struct sk_buff *build_skb_around(struct sk_buff *skb,
350ba0509b6SJesper Dangaard Brouer 				 void *data, unsigned int frag_size)
351ba0509b6SJesper Dangaard Brouer {
352ba0509b6SJesper Dangaard Brouer 	if (unlikely(!skb))
353ba0509b6SJesper Dangaard Brouer 		return NULL;
354ba0509b6SJesper Dangaard Brouer 
355ba0509b6SJesper Dangaard Brouer 	skb = __build_skb_around(skb, data, frag_size);
356ba0509b6SJesper Dangaard Brouer 
357ba0509b6SJesper Dangaard Brouer 	if (skb && frag_size) {
358ba0509b6SJesper Dangaard Brouer 		skb->head_frag = 1;
359ba0509b6SJesper Dangaard Brouer 		if (page_is_pfmemalloc(virt_to_head_page(data)))
360ba0509b6SJesper Dangaard Brouer 			skb->pfmemalloc = 1;
361ba0509b6SJesper Dangaard Brouer 	}
362ba0509b6SJesper Dangaard Brouer 	return skb;
363ba0509b6SJesper Dangaard Brouer }
364ba0509b6SJesper Dangaard Brouer EXPORT_SYMBOL(build_skb_around);
365ba0509b6SJesper Dangaard Brouer 
366795bb1c0SJesper Dangaard Brouer #define NAPI_SKB_CACHE_SIZE	64
367795bb1c0SJesper Dangaard Brouer 
368795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache {
369795bb1c0SJesper Dangaard Brouer 	struct page_frag_cache page;
370e0d7924aSAlexey Dobriyan 	unsigned int skb_count;
371795bb1c0SJesper Dangaard Brouer 	void *skb_cache[NAPI_SKB_CACHE_SIZE];
372795bb1c0SJesper Dangaard Brouer };
373795bb1c0SJesper Dangaard Brouer 
374b63ae8caSAlexander Duyck static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
375795bb1c0SJesper Dangaard Brouer static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
376ffde7328SAlexander Duyck 
377ffde7328SAlexander Duyck static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
378ffde7328SAlexander Duyck {
379795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
3809451980aSAlexander Duyck 
3818c2dd3e4SAlexander Duyck 	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
382ffde7328SAlexander Duyck }
383ffde7328SAlexander Duyck 
384ffde7328SAlexander Duyck void *napi_alloc_frag(unsigned int fragsz)
385ffde7328SAlexander Duyck {
3863bed3cc4SAlexander Duyck 	fragsz = SKB_DATA_ALIGN(fragsz);
3873bed3cc4SAlexander Duyck 
388453f85d4SMel Gorman 	return __napi_alloc_frag(fragsz, GFP_ATOMIC);
389ffde7328SAlexander Duyck }
390ffde7328SAlexander Duyck EXPORT_SYMBOL(napi_alloc_frag);
391ffde7328SAlexander Duyck 
3926f532612SEric Dumazet /**
3937ba7aeabSSebastian Andrzej Siewior  * netdev_alloc_frag - allocate a page fragment
3947ba7aeabSSebastian Andrzej Siewior  * @fragsz: fragment size
3957ba7aeabSSebastian Andrzej Siewior  *
3967ba7aeabSSebastian Andrzej Siewior  * Allocates a frag from a page for receive buffer.
3977ba7aeabSSebastian Andrzej Siewior  * Uses GFP_ATOMIC allocations.
3987ba7aeabSSebastian Andrzej Siewior  */
3997ba7aeabSSebastian Andrzej Siewior void *netdev_alloc_frag(unsigned int fragsz)
4007ba7aeabSSebastian Andrzej Siewior {
4017ba7aeabSSebastian Andrzej Siewior 	struct page_frag_cache *nc;
4027ba7aeabSSebastian Andrzej Siewior 	void *data;
4037ba7aeabSSebastian Andrzej Siewior 
4047ba7aeabSSebastian Andrzej Siewior 	fragsz = SKB_DATA_ALIGN(fragsz);
4057ba7aeabSSebastian Andrzej Siewior 	if (in_irq() || irqs_disabled()) {
4067ba7aeabSSebastian Andrzej Siewior 		nc = this_cpu_ptr(&netdev_alloc_cache);
4077ba7aeabSSebastian Andrzej Siewior 		data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
4087ba7aeabSSebastian Andrzej Siewior 	} else {
4097ba7aeabSSebastian Andrzej Siewior 		local_bh_disable();
4107ba7aeabSSebastian Andrzej Siewior 		data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
4117ba7aeabSSebastian Andrzej Siewior 		local_bh_enable();
4127ba7aeabSSebastian Andrzej Siewior 	}
4137ba7aeabSSebastian Andrzej Siewior 	return data;
4147ba7aeabSSebastian Andrzej Siewior }
4157ba7aeabSSebastian Andrzej Siewior EXPORT_SYMBOL(netdev_alloc_frag);
4167ba7aeabSSebastian Andrzej Siewior 
4177ba7aeabSSebastian Andrzej Siewior /**
418fd11a83dSAlexander Duyck  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
419fd11a83dSAlexander Duyck  *	@dev: network device to receive on
420d7499160SMasanari Iida  *	@len: length to allocate
421fd11a83dSAlexander Duyck  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
422fd11a83dSAlexander Duyck  *
423fd11a83dSAlexander Duyck  *	Allocate a new &sk_buff and assign it a usage count of one. The
424fd11a83dSAlexander Duyck  *	buffer has NET_SKB_PAD headroom built in. Users should allocate
425fd11a83dSAlexander Duyck  *	the headroom they think they need without accounting for the
426fd11a83dSAlexander Duyck  *	built in space. The built in space is used for optimisations.
427fd11a83dSAlexander Duyck  *
428fd11a83dSAlexander Duyck  *	%NULL is returned if there is no free memory.
429fd11a83dSAlexander Duyck  */
4309451980aSAlexander Duyck struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
4319451980aSAlexander Duyck 				   gfp_t gfp_mask)
432fd11a83dSAlexander Duyck {
433b63ae8caSAlexander Duyck 	struct page_frag_cache *nc;
434fd11a83dSAlexander Duyck 	struct sk_buff *skb;
4359451980aSAlexander Duyck 	bool pfmemalloc;
4369451980aSAlexander Duyck 	void *data;
437fd11a83dSAlexander Duyck 
4389451980aSAlexander Duyck 	len += NET_SKB_PAD;
439fd11a83dSAlexander Duyck 
4409451980aSAlexander Duyck 	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
441d0164adcSMel Gorman 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
442a080e7bdSAlexander Duyck 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
443a080e7bdSAlexander Duyck 		if (!skb)
444a080e7bdSAlexander Duyck 			goto skb_fail;
445a080e7bdSAlexander Duyck 		goto skb_success;
446a080e7bdSAlexander Duyck 	}
4479451980aSAlexander Duyck 
4489451980aSAlexander Duyck 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4499451980aSAlexander Duyck 	len = SKB_DATA_ALIGN(len);
4509451980aSAlexander Duyck 
4519451980aSAlexander Duyck 	if (sk_memalloc_socks())
4529451980aSAlexander Duyck 		gfp_mask |= __GFP_MEMALLOC;
4539451980aSAlexander Duyck 
45492dcabd7SSebastian Andrzej Siewior 	if (in_irq() || irqs_disabled()) {
4559451980aSAlexander Duyck 		nc = this_cpu_ptr(&netdev_alloc_cache);
4568c2dd3e4SAlexander Duyck 		data = page_frag_alloc(nc, len, gfp_mask);
4579451980aSAlexander Duyck 		pfmemalloc = nc->pfmemalloc;
45892dcabd7SSebastian Andrzej Siewior 	} else {
45992dcabd7SSebastian Andrzej Siewior 		local_bh_disable();
46092dcabd7SSebastian Andrzej Siewior 		nc = this_cpu_ptr(&napi_alloc_cache.page);
46192dcabd7SSebastian Andrzej Siewior 		data = page_frag_alloc(nc, len, gfp_mask);
46292dcabd7SSebastian Andrzej Siewior 		pfmemalloc = nc->pfmemalloc;
46392dcabd7SSebastian Andrzej Siewior 		local_bh_enable();
46492dcabd7SSebastian Andrzej Siewior 	}
4659451980aSAlexander Duyck 
4669451980aSAlexander Duyck 	if (unlikely(!data))
4679451980aSAlexander Duyck 		return NULL;
4689451980aSAlexander Duyck 
4699451980aSAlexander Duyck 	skb = __build_skb(data, len);
4709451980aSAlexander Duyck 	if (unlikely(!skb)) {
471181edb2bSAlexander Duyck 		skb_free_frag(data);
4729451980aSAlexander Duyck 		return NULL;
4739451980aSAlexander Duyck 	}
4749451980aSAlexander Duyck 
4759451980aSAlexander Duyck 	if (pfmemalloc)
4769451980aSAlexander Duyck 		skb->pfmemalloc = 1;
4779451980aSAlexander Duyck 	skb->head_frag = 1;
4789451980aSAlexander Duyck 
479a080e7bdSAlexander Duyck skb_success:
4808af27456SChristoph Hellwig 	skb_reserve(skb, NET_SKB_PAD);
4817b2e497aSChristoph Hellwig 	skb->dev = dev;
482fd11a83dSAlexander Duyck 
483a080e7bdSAlexander Duyck skb_fail:
4848af27456SChristoph Hellwig 	return skb;
4858af27456SChristoph Hellwig }
486b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb);
4871da177e4SLinus Torvalds 
488fd11a83dSAlexander Duyck /**
489fd11a83dSAlexander Duyck  *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
490fd11a83dSAlexander Duyck  *	@napi: napi instance this buffer was allocated for
491d7499160SMasanari Iida  *	@len: length to allocate
492fd11a83dSAlexander Duyck  *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
493fd11a83dSAlexander Duyck  *
494fd11a83dSAlexander Duyck  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
495fd11a83dSAlexander Duyck  *	attempt to allocate the head from a special reserved region used
496fd11a83dSAlexander Duyck  *	only for NAPI Rx allocation.  By doing this we can save several
497fd11a83dSAlexander Duyck  *	CPU cycles by avoiding having to disable and re-enable IRQs.
498fd11a83dSAlexander Duyck  *
499fd11a83dSAlexander Duyck  *	%NULL is returned if there is no free memory.
500fd11a83dSAlexander Duyck  */
5019451980aSAlexander Duyck struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
5029451980aSAlexander Duyck 				 gfp_t gfp_mask)
503fd11a83dSAlexander Duyck {
504795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
505fd11a83dSAlexander Duyck 	struct sk_buff *skb;
5069451980aSAlexander Duyck 	void *data;
507fd11a83dSAlexander Duyck 
5089451980aSAlexander Duyck 	len += NET_SKB_PAD + NET_IP_ALIGN;
509fd11a83dSAlexander Duyck 
5109451980aSAlexander Duyck 	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
511d0164adcSMel Gorman 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
512a080e7bdSAlexander Duyck 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
513a080e7bdSAlexander Duyck 		if (!skb)
514a080e7bdSAlexander Duyck 			goto skb_fail;
515a080e7bdSAlexander Duyck 		goto skb_success;
516a080e7bdSAlexander Duyck 	}
5179451980aSAlexander Duyck 
5189451980aSAlexander Duyck 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5199451980aSAlexander Duyck 	len = SKB_DATA_ALIGN(len);
5209451980aSAlexander Duyck 
5219451980aSAlexander Duyck 	if (sk_memalloc_socks())
5229451980aSAlexander Duyck 		gfp_mask |= __GFP_MEMALLOC;
5239451980aSAlexander Duyck 
5248c2dd3e4SAlexander Duyck 	data = page_frag_alloc(&nc->page, len, gfp_mask);
5259451980aSAlexander Duyck 	if (unlikely(!data))
5269451980aSAlexander Duyck 		return NULL;
5279451980aSAlexander Duyck 
5289451980aSAlexander Duyck 	skb = __build_skb(data, len);
5299451980aSAlexander Duyck 	if (unlikely(!skb)) {
530181edb2bSAlexander Duyck 		skb_free_frag(data);
5319451980aSAlexander Duyck 		return NULL;
5329451980aSAlexander Duyck 	}
5339451980aSAlexander Duyck 
534795bb1c0SJesper Dangaard Brouer 	if (nc->page.pfmemalloc)
5359451980aSAlexander Duyck 		skb->pfmemalloc = 1;
5369451980aSAlexander Duyck 	skb->head_frag = 1;
5379451980aSAlexander Duyck 
538a080e7bdSAlexander Duyck skb_success:
539fd11a83dSAlexander Duyck 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
540fd11a83dSAlexander Duyck 	skb->dev = napi->dev;
541fd11a83dSAlexander Duyck 
542a080e7bdSAlexander Duyck skb_fail:
543fd11a83dSAlexander Duyck 	return skb;
544fd11a83dSAlexander Duyck }
545fd11a83dSAlexander Duyck EXPORT_SYMBOL(__napi_alloc_skb);
546fd11a83dSAlexander Duyck 
547654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
54850269e19SEric Dumazet 		     int size, unsigned int truesize)
549654bed16SPeter Zijlstra {
550654bed16SPeter Zijlstra 	skb_fill_page_desc(skb, i, page, off, size);
551654bed16SPeter Zijlstra 	skb->len += size;
552654bed16SPeter Zijlstra 	skb->data_len += size;
55350269e19SEric Dumazet 	skb->truesize += truesize;
554654bed16SPeter Zijlstra }
555654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag);
556654bed16SPeter Zijlstra 
557f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
558f8e617e1SJason Wang 			  unsigned int truesize)
559f8e617e1SJason Wang {
560f8e617e1SJason Wang 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
561f8e617e1SJason Wang 
562f8e617e1SJason Wang 	skb_frag_size_add(frag, size);
563f8e617e1SJason Wang 	skb->len += size;
564f8e617e1SJason Wang 	skb->data_len += size;
565f8e617e1SJason Wang 	skb->truesize += truesize;
566f8e617e1SJason Wang }
567f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag);
568f8e617e1SJason Wang 
56927b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp)
5701da177e4SLinus Torvalds {
571bd8a7036SEric Dumazet 	kfree_skb_list(*listp);
57227b437c8SHerbert Xu 	*listp = NULL;
5731da177e4SLinus Torvalds }
5741da177e4SLinus Torvalds 
57527b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb)
57627b437c8SHerbert Xu {
57727b437c8SHerbert Xu 	skb_drop_list(&skb_shinfo(skb)->frag_list);
57827b437c8SHerbert Xu }
57927b437c8SHerbert Xu 
5801da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb)
5811da177e4SLinus Torvalds {
5821da177e4SLinus Torvalds 	struct sk_buff *list;
5831da177e4SLinus Torvalds 
584fbb398a8SDavid S. Miller 	skb_walk_frags(skb, list)
5851da177e4SLinus Torvalds 		skb_get(list);
5861da177e4SLinus Torvalds }
5871da177e4SLinus Torvalds 
588d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb)
589d3836f21SEric Dumazet {
590181edb2bSAlexander Duyck 	unsigned char *head = skb->head;
591181edb2bSAlexander Duyck 
592d3836f21SEric Dumazet 	if (skb->head_frag)
593181edb2bSAlexander Duyck 		skb_free_frag(head);
594d3836f21SEric Dumazet 	else
595181edb2bSAlexander Duyck 		kfree(head);
596d3836f21SEric Dumazet }
597d3836f21SEric Dumazet 
5985bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb)
5991da177e4SLinus Torvalds {
600ff04a771SEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
6011da177e4SLinus Torvalds 	int i;
602ff04a771SEric Dumazet 
603ff04a771SEric Dumazet 	if (skb->cloned &&
604ff04a771SEric Dumazet 	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
605ff04a771SEric Dumazet 			      &shinfo->dataref))
606ff04a771SEric Dumazet 		return;
607ff04a771SEric Dumazet 
608ff04a771SEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++)
609ff04a771SEric Dumazet 		__skb_frag_unref(&shinfo->frags[i]);
6101da177e4SLinus Torvalds 
611ff04a771SEric Dumazet 	if (shinfo->frag_list)
612ff04a771SEric Dumazet 		kfree_skb_list(shinfo->frag_list);
6131da177e4SLinus Torvalds 
6141f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, true);
615d3836f21SEric Dumazet 	skb_free_head(skb);
6161da177e4SLinus Torvalds }
6171da177e4SLinus Torvalds 
6181da177e4SLinus Torvalds /*
6191da177e4SLinus Torvalds  *	Free an skbuff by memory without cleaning the state.
6201da177e4SLinus Torvalds  */
6212d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb)
6221da177e4SLinus Torvalds {
623d0bf4a9eSEric Dumazet 	struct sk_buff_fclones *fclones;
624d179cd12SDavid S. Miller 
625d179cd12SDavid S. Miller 	switch (skb->fclone) {
626d179cd12SDavid S. Miller 	case SKB_FCLONE_UNAVAILABLE:
6271da177e4SLinus Torvalds 		kmem_cache_free(skbuff_head_cache, skb);
6286ffe75ebSEric Dumazet 		return;
629d179cd12SDavid S. Miller 
630d179cd12SDavid S. Miller 	case SKB_FCLONE_ORIG:
631d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
6326ffe75ebSEric Dumazet 
6336ffe75ebSEric Dumazet 		/* We usually free the clone (TX completion) before original skb
6346ffe75ebSEric Dumazet 		 * This test would have no chance to be true for the clone,
6356ffe75ebSEric Dumazet 		 * while here, branch prediction will be good.
6366ffe75ebSEric Dumazet 		 */
6372638595aSReshetova, Elena 		if (refcount_read(&fclones->fclone_ref) == 1)
6386ffe75ebSEric Dumazet 			goto fastpath;
639d179cd12SDavid S. Miller 		break;
640d179cd12SDavid S. Miller 
6416ffe75ebSEric Dumazet 	default: /* SKB_FCLONE_CLONE */
642d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
643d179cd12SDavid S. Miller 		break;
6443ff50b79SStephen Hemminger 	}
6452638595aSReshetova, Elena 	if (!refcount_dec_and_test(&fclones->fclone_ref))
6466ffe75ebSEric Dumazet 		return;
6476ffe75ebSEric Dumazet fastpath:
6486ffe75ebSEric Dumazet 	kmem_cache_free(skbuff_fclone_cache, fclones);
6491da177e4SLinus Torvalds }
6501da177e4SLinus Torvalds 
6510a463c78SPaolo Abeni void skb_release_head_state(struct sk_buff *skb)
6521da177e4SLinus Torvalds {
653adf30907SEric Dumazet 	skb_dst_drop(skb);
6541da177e4SLinus Torvalds 	if (skb->destructor) {
6559c2b3328SStephen Hemminger 		WARN_ON(in_irq());
6561da177e4SLinus Torvalds 		skb->destructor(skb);
6571da177e4SLinus Torvalds 	}
658a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK)
659cb9c6836SFlorian Westphal 	nf_conntrack_put(skb_nfct(skb));
6602fc72c7bSKOVACS Krisztian #endif
661df5042f4SFlorian Westphal 	skb_ext_put(skb);
66204a4bb55SLennert Buytenhek }
66304a4bb55SLennert Buytenhek 
66404a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */
66504a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb)
66604a4bb55SLennert Buytenhek {
66704a4bb55SLennert Buytenhek 	skb_release_head_state(skb);
668a28b1b90SFlorian Westphal 	if (likely(skb->head))
6692d4baff8SHerbert Xu 		skb_release_data(skb);
6702d4baff8SHerbert Xu }
6711da177e4SLinus Torvalds 
6722d4baff8SHerbert Xu /**
6732d4baff8SHerbert Xu  *	__kfree_skb - private function
6742d4baff8SHerbert Xu  *	@skb: buffer
6752d4baff8SHerbert Xu  *
6762d4baff8SHerbert Xu  *	Free an sk_buff. Release anything attached to the buffer.
6772d4baff8SHerbert Xu  *	Clean the state. This is an internal helper function. Users should
6782d4baff8SHerbert Xu  *	always call kfree_skb
6792d4baff8SHerbert Xu  */
6802d4baff8SHerbert Xu 
6812d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb)
6822d4baff8SHerbert Xu {
6832d4baff8SHerbert Xu 	skb_release_all(skb);
6841da177e4SLinus Torvalds 	kfree_skbmem(skb);
6851da177e4SLinus Torvalds }
686b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb);
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds /**
689231d06aeSJörn Engel  *	kfree_skb - free an sk_buff
690231d06aeSJörn Engel  *	@skb: buffer to free
691231d06aeSJörn Engel  *
692231d06aeSJörn Engel  *	Drop a reference to the buffer and free it if the usage count has
693231d06aeSJörn Engel  *	hit zero.
694231d06aeSJörn Engel  */
695231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb)
696231d06aeSJörn Engel {
6973889a803SPaolo Abeni 	if (!skb_unref(skb))
698231d06aeSJörn Engel 		return;
6993889a803SPaolo Abeni 
700ead2ceb0SNeil Horman 	trace_kfree_skb(skb, __builtin_return_address(0));
701231d06aeSJörn Engel 	__kfree_skb(skb);
702231d06aeSJörn Engel }
703b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb);
704231d06aeSJörn Engel 
705bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs)
706bd8a7036SEric Dumazet {
707bd8a7036SEric Dumazet 	while (segs) {
708bd8a7036SEric Dumazet 		struct sk_buff *next = segs->next;
709bd8a7036SEric Dumazet 
710bd8a7036SEric Dumazet 		kfree_skb(segs);
711bd8a7036SEric Dumazet 		segs = next;
712bd8a7036SEric Dumazet 	}
713bd8a7036SEric Dumazet }
714bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list);
715bd8a7036SEric Dumazet 
7166413139dSWillem de Bruijn /* Dump skb information and contents.
7176413139dSWillem de Bruijn  *
7186413139dSWillem de Bruijn  * Must only be called from net_ratelimit()-ed paths.
7196413139dSWillem de Bruijn  *
720302af7c6SVladimir Oltean  * Dumps whole packets if full_pkt, only headers otherwise.
7216413139dSWillem de Bruijn  */
7226413139dSWillem de Bruijn void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
7236413139dSWillem de Bruijn {
7246413139dSWillem de Bruijn 	struct skb_shared_info *sh = skb_shinfo(skb);
7256413139dSWillem de Bruijn 	struct net_device *dev = skb->dev;
7266413139dSWillem de Bruijn 	struct sock *sk = skb->sk;
7276413139dSWillem de Bruijn 	struct sk_buff *list_skb;
7286413139dSWillem de Bruijn 	bool has_mac, has_trans;
7296413139dSWillem de Bruijn 	int headroom, tailroom;
7306413139dSWillem de Bruijn 	int i, len, seg_len;
7316413139dSWillem de Bruijn 
7326413139dSWillem de Bruijn 	if (full_pkt)
7336413139dSWillem de Bruijn 		len = skb->len;
7346413139dSWillem de Bruijn 	else
7356413139dSWillem de Bruijn 		len = min_t(int, skb->len, MAX_HEADER + 128);
7366413139dSWillem de Bruijn 
7376413139dSWillem de Bruijn 	headroom = skb_headroom(skb);
7386413139dSWillem de Bruijn 	tailroom = skb_tailroom(skb);
7396413139dSWillem de Bruijn 
7406413139dSWillem de Bruijn 	has_mac = skb_mac_header_was_set(skb);
7416413139dSWillem de Bruijn 	has_trans = skb_transport_header_was_set(skb);
7426413139dSWillem de Bruijn 
7436413139dSWillem de Bruijn 	printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
7446413139dSWillem de Bruijn 	       "mac=(%d,%d) net=(%d,%d) trans=%d\n"
7456413139dSWillem de Bruijn 	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
7466413139dSWillem de Bruijn 	       "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
7476413139dSWillem de Bruijn 	       "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
7486413139dSWillem de Bruijn 	       level, skb->len, headroom, skb_headlen(skb), tailroom,
7496413139dSWillem de Bruijn 	       has_mac ? skb->mac_header : -1,
7506413139dSWillem de Bruijn 	       has_mac ? skb_mac_header_len(skb) : -1,
7516413139dSWillem de Bruijn 	       skb->network_header,
7526413139dSWillem de Bruijn 	       has_trans ? skb_network_header_len(skb) : -1,
7536413139dSWillem de Bruijn 	       has_trans ? skb->transport_header : -1,
7546413139dSWillem de Bruijn 	       sh->tx_flags, sh->nr_frags,
7556413139dSWillem de Bruijn 	       sh->gso_size, sh->gso_type, sh->gso_segs,
7566413139dSWillem de Bruijn 	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
7576413139dSWillem de Bruijn 	       skb->csum_valid, skb->csum_level,
7586413139dSWillem de Bruijn 	       skb->hash, skb->sw_hash, skb->l4_hash,
7596413139dSWillem de Bruijn 	       ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
7606413139dSWillem de Bruijn 
7616413139dSWillem de Bruijn 	if (dev)
7626413139dSWillem de Bruijn 		printk("%sdev name=%s feat=0x%pNF\n",
7636413139dSWillem de Bruijn 		       level, dev->name, &dev->features);
7646413139dSWillem de Bruijn 	if (sk)
765db8051f3SQian Cai 		printk("%ssk family=%hu type=%u proto=%u\n",
7666413139dSWillem de Bruijn 		       level, sk->sk_family, sk->sk_type, sk->sk_protocol);
7676413139dSWillem de Bruijn 
7686413139dSWillem de Bruijn 	if (full_pkt && headroom)
7696413139dSWillem de Bruijn 		print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
7706413139dSWillem de Bruijn 			       16, 1, skb->head, headroom, false);
7716413139dSWillem de Bruijn 
7726413139dSWillem de Bruijn 	seg_len = min_t(int, skb_headlen(skb), len);
7736413139dSWillem de Bruijn 	if (seg_len)
7746413139dSWillem de Bruijn 		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET,
7756413139dSWillem de Bruijn 			       16, 1, skb->data, seg_len, false);
7766413139dSWillem de Bruijn 	len -= seg_len;
7776413139dSWillem de Bruijn 
7786413139dSWillem de Bruijn 	if (full_pkt && tailroom)
7796413139dSWillem de Bruijn 		print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
7806413139dSWillem de Bruijn 			       16, 1, skb_tail_pointer(skb), tailroom, false);
7816413139dSWillem de Bruijn 
7826413139dSWillem de Bruijn 	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
7836413139dSWillem de Bruijn 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7846413139dSWillem de Bruijn 		u32 p_off, p_len, copied;
7856413139dSWillem de Bruijn 		struct page *p;
7866413139dSWillem de Bruijn 		u8 *vaddr;
7876413139dSWillem de Bruijn 
788b54c9d5bSJonathan Lemon 		skb_frag_foreach_page(frag, skb_frag_off(frag),
7896413139dSWillem de Bruijn 				      skb_frag_size(frag), p, p_off, p_len,
7906413139dSWillem de Bruijn 				      copied) {
7916413139dSWillem de Bruijn 			seg_len = min_t(int, p_len, len);
7926413139dSWillem de Bruijn 			vaddr = kmap_atomic(p);
7936413139dSWillem de Bruijn 			print_hex_dump(level, "skb frag:     ",
7946413139dSWillem de Bruijn 				       DUMP_PREFIX_OFFSET,
7956413139dSWillem de Bruijn 				       16, 1, vaddr + p_off, seg_len, false);
7966413139dSWillem de Bruijn 			kunmap_atomic(vaddr);
7976413139dSWillem de Bruijn 			len -= seg_len;
7986413139dSWillem de Bruijn 			if (!len)
7996413139dSWillem de Bruijn 				break;
8006413139dSWillem de Bruijn 		}
8016413139dSWillem de Bruijn 	}
8026413139dSWillem de Bruijn 
8036413139dSWillem de Bruijn 	if (full_pkt && skb_has_frag_list(skb)) {
8046413139dSWillem de Bruijn 		printk("skb fraglist:\n");
8056413139dSWillem de Bruijn 		skb_walk_frags(skb, list_skb)
8066413139dSWillem de Bruijn 			skb_dump(level, list_skb, true);
8076413139dSWillem de Bruijn 	}
8086413139dSWillem de Bruijn }
8096413139dSWillem de Bruijn EXPORT_SYMBOL(skb_dump);
8106413139dSWillem de Bruijn 
811d1a203eaSStephen Hemminger /**
81225121173SMichael S. Tsirkin  *	skb_tx_error - report an sk_buff xmit error
81325121173SMichael S. Tsirkin  *	@skb: buffer that triggered an error
81425121173SMichael S. Tsirkin  *
81525121173SMichael S. Tsirkin  *	Report xmit error if a device callback is tracking this skb.
81625121173SMichael S. Tsirkin  *	skb must be freed afterwards.
81725121173SMichael S. Tsirkin  */
81825121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb)
81925121173SMichael S. Tsirkin {
8201f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, true);
82125121173SMichael S. Tsirkin }
82225121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error);
82325121173SMichael S. Tsirkin 
824be769db2SHerbert Xu #ifdef CONFIG_TRACEPOINTS
82525121173SMichael S. Tsirkin /**
826ead2ceb0SNeil Horman  *	consume_skb - free an skbuff
827ead2ceb0SNeil Horman  *	@skb: buffer to free
828ead2ceb0SNeil Horman  *
829ead2ceb0SNeil Horman  *	Drop a ref to the buffer and free it if the usage count has hit zero
830ead2ceb0SNeil Horman  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
831ead2ceb0SNeil Horman  *	is being dropped after a failure and notes that
832ead2ceb0SNeil Horman  */
833ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb)
834ead2ceb0SNeil Horman {
8353889a803SPaolo Abeni 	if (!skb_unref(skb))
836ead2ceb0SNeil Horman 		return;
8373889a803SPaolo Abeni 
83807dc22e7SKoki Sanagi 	trace_consume_skb(skb);
839ead2ceb0SNeil Horman 	__kfree_skb(skb);
840ead2ceb0SNeil Horman }
841ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb);
842be769db2SHerbert Xu #endif
843ead2ceb0SNeil Horman 
8440a463c78SPaolo Abeni /**
845c1639be9SMauro Carvalho Chehab  *	__consume_stateless_skb - free an skbuff, assuming it is stateless
8460a463c78SPaolo Abeni  *	@skb: buffer to free
8470a463c78SPaolo Abeni  *
848ca2c1418SPaolo Abeni  *	Alike consume_skb(), but this variant assumes that this is the last
849ca2c1418SPaolo Abeni  *	skb reference and all the head states have been already dropped
8500a463c78SPaolo Abeni  */
851ca2c1418SPaolo Abeni void __consume_stateless_skb(struct sk_buff *skb)
8520a463c78SPaolo Abeni {
8530a463c78SPaolo Abeni 	trace_consume_skb(skb);
8540a463c78SPaolo Abeni 	skb_release_data(skb);
8550a463c78SPaolo Abeni 	kfree_skbmem(skb);
8560a463c78SPaolo Abeni }
8570a463c78SPaolo Abeni 
858795bb1c0SJesper Dangaard Brouer void __kfree_skb_flush(void)
859795bb1c0SJesper Dangaard Brouer {
860795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
861795bb1c0SJesper Dangaard Brouer 
862795bb1c0SJesper Dangaard Brouer 	/* flush skb_cache if containing objects */
863795bb1c0SJesper Dangaard Brouer 	if (nc->skb_count) {
864795bb1c0SJesper Dangaard Brouer 		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
865795bb1c0SJesper Dangaard Brouer 				     nc->skb_cache);
866795bb1c0SJesper Dangaard Brouer 		nc->skb_count = 0;
867795bb1c0SJesper Dangaard Brouer 	}
868795bb1c0SJesper Dangaard Brouer }
869795bb1c0SJesper Dangaard Brouer 
87015fad714SJesper Dangaard Brouer static inline void _kfree_skb_defer(struct sk_buff *skb)
871795bb1c0SJesper Dangaard Brouer {
872795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
873795bb1c0SJesper Dangaard Brouer 
874795bb1c0SJesper Dangaard Brouer 	/* drop skb->head and call any destructors for packet */
875795bb1c0SJesper Dangaard Brouer 	skb_release_all(skb);
876795bb1c0SJesper Dangaard Brouer 
877795bb1c0SJesper Dangaard Brouer 	/* record skb to CPU local list */
878795bb1c0SJesper Dangaard Brouer 	nc->skb_cache[nc->skb_count++] = skb;
879795bb1c0SJesper Dangaard Brouer 
880795bb1c0SJesper Dangaard Brouer #ifdef CONFIG_SLUB
881795bb1c0SJesper Dangaard Brouer 	/* SLUB writes into objects when freeing */
882795bb1c0SJesper Dangaard Brouer 	prefetchw(skb);
883795bb1c0SJesper Dangaard Brouer #endif
884795bb1c0SJesper Dangaard Brouer 
885795bb1c0SJesper Dangaard Brouer 	/* flush skb_cache if it is filled */
886795bb1c0SJesper Dangaard Brouer 	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
887795bb1c0SJesper Dangaard Brouer 		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
888795bb1c0SJesper Dangaard Brouer 				     nc->skb_cache);
889795bb1c0SJesper Dangaard Brouer 		nc->skb_count = 0;
890795bb1c0SJesper Dangaard Brouer 	}
891795bb1c0SJesper Dangaard Brouer }
89215fad714SJesper Dangaard Brouer void __kfree_skb_defer(struct sk_buff *skb)
89315fad714SJesper Dangaard Brouer {
89415fad714SJesper Dangaard Brouer 	_kfree_skb_defer(skb);
89515fad714SJesper Dangaard Brouer }
896795bb1c0SJesper Dangaard Brouer 
897795bb1c0SJesper Dangaard Brouer void napi_consume_skb(struct sk_buff *skb, int budget)
898795bb1c0SJesper Dangaard Brouer {
899885eb0a5SJesper Dangaard Brouer 	/* Zero budget indicate non-NAPI context called us, like netpoll */
900795bb1c0SJesper Dangaard Brouer 	if (unlikely(!budget)) {
901885eb0a5SJesper Dangaard Brouer 		dev_consume_skb_any(skb);
902795bb1c0SJesper Dangaard Brouer 		return;
903795bb1c0SJesper Dangaard Brouer 	}
904795bb1c0SJesper Dangaard Brouer 
9056454eca8SYunsheng Lin 	lockdep_assert_in_softirq();
9066454eca8SYunsheng Lin 
9077608894eSPaolo Abeni 	if (!skb_unref(skb))
908795bb1c0SJesper Dangaard Brouer 		return;
9097608894eSPaolo Abeni 
910795bb1c0SJesper Dangaard Brouer 	/* if reaching here SKB is ready to free */
911795bb1c0SJesper Dangaard Brouer 	trace_consume_skb(skb);
912795bb1c0SJesper Dangaard Brouer 
913795bb1c0SJesper Dangaard Brouer 	/* if SKB is a clone, don't handle this case */
914abbdb5a7SEric Dumazet 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
915795bb1c0SJesper Dangaard Brouer 		__kfree_skb(skb);
916795bb1c0SJesper Dangaard Brouer 		return;
917795bb1c0SJesper Dangaard Brouer 	}
918795bb1c0SJesper Dangaard Brouer 
91915fad714SJesper Dangaard Brouer 	_kfree_skb_defer(skb);
920795bb1c0SJesper Dangaard Brouer }
921795bb1c0SJesper Dangaard Brouer EXPORT_SYMBOL(napi_consume_skb);
922795bb1c0SJesper Dangaard Brouer 
923b1937227SEric Dumazet /* Make sure a field is enclosed inside headers_start/headers_end section */
924b1937227SEric Dumazet #define CHECK_SKB_FIELD(field) \
925b1937227SEric Dumazet 	BUILD_BUG_ON(offsetof(struct sk_buff, field) <		\
926b1937227SEric Dumazet 		     offsetof(struct sk_buff, headers_start));	\
927b1937227SEric Dumazet 	BUILD_BUG_ON(offsetof(struct sk_buff, field) >		\
928b1937227SEric Dumazet 		     offsetof(struct sk_buff, headers_end));	\
929b1937227SEric Dumazet 
930dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
931dec18810SHerbert Xu {
932dec18810SHerbert Xu 	new->tstamp		= old->tstamp;
933b1937227SEric Dumazet 	/* We do not copy old->sk */
934dec18810SHerbert Xu 	new->dev		= old->dev;
935b1937227SEric Dumazet 	memcpy(new->cb, old->cb, sizeof(old->cb));
9367fee226aSEric Dumazet 	skb_dst_copy(new, old);
937df5042f4SFlorian Westphal 	__skb_ext_copy(new, old);
938b1937227SEric Dumazet 	__nf_copy(new, old, false);
9396aa895b0SPatrick McHardy 
940b1937227SEric Dumazet 	/* Note : this field could be in headers_start/headers_end section
941b1937227SEric Dumazet 	 * It is not yet because we do not want to have a 16 bit hole
942b1937227SEric Dumazet 	 */
943b1937227SEric Dumazet 	new->queue_mapping = old->queue_mapping;
94406021292SEliezer Tamir 
945b1937227SEric Dumazet 	memcpy(&new->headers_start, &old->headers_start,
946b1937227SEric Dumazet 	       offsetof(struct sk_buff, headers_end) -
947b1937227SEric Dumazet 	       offsetof(struct sk_buff, headers_start));
948b1937227SEric Dumazet 	CHECK_SKB_FIELD(protocol);
949b1937227SEric Dumazet 	CHECK_SKB_FIELD(csum);
950b1937227SEric Dumazet 	CHECK_SKB_FIELD(hash);
951b1937227SEric Dumazet 	CHECK_SKB_FIELD(priority);
952b1937227SEric Dumazet 	CHECK_SKB_FIELD(skb_iif);
953b1937227SEric Dumazet 	CHECK_SKB_FIELD(vlan_proto);
954b1937227SEric Dumazet 	CHECK_SKB_FIELD(vlan_tci);
955b1937227SEric Dumazet 	CHECK_SKB_FIELD(transport_header);
956b1937227SEric Dumazet 	CHECK_SKB_FIELD(network_header);
957b1937227SEric Dumazet 	CHECK_SKB_FIELD(mac_header);
958b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_protocol);
959b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_transport_header);
960b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_network_header);
961b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_mac_header);
962b1937227SEric Dumazet 	CHECK_SKB_FIELD(mark);
963b1937227SEric Dumazet #ifdef CONFIG_NETWORK_SECMARK
964b1937227SEric Dumazet 	CHECK_SKB_FIELD(secmark);
965b1937227SEric Dumazet #endif
966e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL
967b1937227SEric Dumazet 	CHECK_SKB_FIELD(napi_id);
96806021292SEliezer Tamir #endif
9692bd82484SEric Dumazet #ifdef CONFIG_XPS
9702bd82484SEric Dumazet 	CHECK_SKB_FIELD(sender_cpu);
9712bd82484SEric Dumazet #endif
972b1937227SEric Dumazet #ifdef CONFIG_NET_SCHED
973b1937227SEric Dumazet 	CHECK_SKB_FIELD(tc_index);
974b1937227SEric Dumazet #endif
975b1937227SEric Dumazet 
976dec18810SHerbert Xu }
977dec18810SHerbert Xu 
97882c49a35SHerbert Xu /*
97982c49a35SHerbert Xu  * You should not add any new code to this function.  Add it to
98082c49a35SHerbert Xu  * __copy_skb_header above instead.
98182c49a35SHerbert Xu  */
982e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
9831da177e4SLinus Torvalds {
9841da177e4SLinus Torvalds #define C(x) n->x = skb->x
9851da177e4SLinus Torvalds 
9861da177e4SLinus Torvalds 	n->next = n->prev = NULL;
9871da177e4SLinus Torvalds 	n->sk = NULL;
988dec18810SHerbert Xu 	__copy_skb_header(n, skb);
989dec18810SHerbert Xu 
9901da177e4SLinus Torvalds 	C(len);
9911da177e4SLinus Torvalds 	C(data_len);
9923e6b3b2eSAlexey Dobriyan 	C(mac_len);
993334a8132SPatrick McHardy 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
99402f1c89dSPaul Moore 	n->cloned = 1;
9951da177e4SLinus Torvalds 	n->nohdr = 0;
996b13dda9fSEric Dumazet 	n->peeked = 0;
997e78bfb07SStefano Brivio 	C(pfmemalloc);
9981da177e4SLinus Torvalds 	n->destructor = NULL;
9991da177e4SLinus Torvalds 	C(tail);
10001da177e4SLinus Torvalds 	C(end);
100102f1c89dSPaul Moore 	C(head);
1002d3836f21SEric Dumazet 	C(head_frag);
100302f1c89dSPaul Moore 	C(data);
100402f1c89dSPaul Moore 	C(truesize);
100563354797SReshetova, Elena 	refcount_set(&n->users, 1);
10061da177e4SLinus Torvalds 
10071da177e4SLinus Torvalds 	atomic_inc(&(skb_shinfo(skb)->dataref));
10081da177e4SLinus Torvalds 	skb->cloned = 1;
10091da177e4SLinus Torvalds 
10101da177e4SLinus Torvalds 	return n;
1011e0053ec0SHerbert Xu #undef C
1012e0053ec0SHerbert Xu }
1013e0053ec0SHerbert Xu 
1014e0053ec0SHerbert Xu /**
1015da29e4b4SJakub Kicinski  * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1016da29e4b4SJakub Kicinski  * @first: first sk_buff of the msg
1017da29e4b4SJakub Kicinski  */
1018da29e4b4SJakub Kicinski struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1019da29e4b4SJakub Kicinski {
1020da29e4b4SJakub Kicinski 	struct sk_buff *n;
1021da29e4b4SJakub Kicinski 
1022da29e4b4SJakub Kicinski 	n = alloc_skb(0, GFP_ATOMIC);
1023da29e4b4SJakub Kicinski 	if (!n)
1024da29e4b4SJakub Kicinski 		return NULL;
1025da29e4b4SJakub Kicinski 
1026da29e4b4SJakub Kicinski 	n->len = first->len;
1027da29e4b4SJakub Kicinski 	n->data_len = first->len;
1028da29e4b4SJakub Kicinski 	n->truesize = first->truesize;
1029da29e4b4SJakub Kicinski 
1030da29e4b4SJakub Kicinski 	skb_shinfo(n)->frag_list = first;
1031da29e4b4SJakub Kicinski 
1032da29e4b4SJakub Kicinski 	__copy_skb_header(n, first);
1033da29e4b4SJakub Kicinski 	n->destructor = NULL;
1034da29e4b4SJakub Kicinski 
1035da29e4b4SJakub Kicinski 	return n;
1036da29e4b4SJakub Kicinski }
1037da29e4b4SJakub Kicinski EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1038da29e4b4SJakub Kicinski 
1039da29e4b4SJakub Kicinski /**
1040e0053ec0SHerbert Xu  *	skb_morph	-	morph one skb into another
1041e0053ec0SHerbert Xu  *	@dst: the skb to receive the contents
1042e0053ec0SHerbert Xu  *	@src: the skb to supply the contents
1043e0053ec0SHerbert Xu  *
1044e0053ec0SHerbert Xu  *	This is identical to skb_clone except that the target skb is
1045e0053ec0SHerbert Xu  *	supplied by the user.
1046e0053ec0SHerbert Xu  *
1047e0053ec0SHerbert Xu  *	The target skb is returned upon exit.
1048e0053ec0SHerbert Xu  */
1049e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1050e0053ec0SHerbert Xu {
10512d4baff8SHerbert Xu 	skb_release_all(dst);
1052e0053ec0SHerbert Xu 	return __skb_clone(dst, src);
1053e0053ec0SHerbert Xu }
1054e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph);
1055e0053ec0SHerbert Xu 
10566f89dbceSSowmini Varadhan int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1057a91dbff5SWillem de Bruijn {
1058a91dbff5SWillem de Bruijn 	unsigned long max_pg, num_pg, new_pg, old_pg;
1059a91dbff5SWillem de Bruijn 	struct user_struct *user;
1060a91dbff5SWillem de Bruijn 
1061a91dbff5SWillem de Bruijn 	if (capable(CAP_IPC_LOCK) || !size)
1062a91dbff5SWillem de Bruijn 		return 0;
1063a91dbff5SWillem de Bruijn 
1064a91dbff5SWillem de Bruijn 	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
1065a91dbff5SWillem de Bruijn 	max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1066a91dbff5SWillem de Bruijn 	user = mmp->user ? : current_user();
1067a91dbff5SWillem de Bruijn 
1068a91dbff5SWillem de Bruijn 	do {
1069a91dbff5SWillem de Bruijn 		old_pg = atomic_long_read(&user->locked_vm);
1070a91dbff5SWillem de Bruijn 		new_pg = old_pg + num_pg;
1071a91dbff5SWillem de Bruijn 		if (new_pg > max_pg)
1072a91dbff5SWillem de Bruijn 			return -ENOBUFS;
1073a91dbff5SWillem de Bruijn 	} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1074a91dbff5SWillem de Bruijn 		 old_pg);
1075a91dbff5SWillem de Bruijn 
1076a91dbff5SWillem de Bruijn 	if (!mmp->user) {
1077a91dbff5SWillem de Bruijn 		mmp->user = get_uid(user);
1078a91dbff5SWillem de Bruijn 		mmp->num_pg = num_pg;
1079a91dbff5SWillem de Bruijn 	} else {
1080a91dbff5SWillem de Bruijn 		mmp->num_pg += num_pg;
1081a91dbff5SWillem de Bruijn 	}
1082a91dbff5SWillem de Bruijn 
1083a91dbff5SWillem de Bruijn 	return 0;
1084a91dbff5SWillem de Bruijn }
10856f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1086a91dbff5SWillem de Bruijn 
10876f89dbceSSowmini Varadhan void mm_unaccount_pinned_pages(struct mmpin *mmp)
1088a91dbff5SWillem de Bruijn {
1089a91dbff5SWillem de Bruijn 	if (mmp->user) {
1090a91dbff5SWillem de Bruijn 		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1091a91dbff5SWillem de Bruijn 		free_uid(mmp->user);
1092a91dbff5SWillem de Bruijn 	}
1093a91dbff5SWillem de Bruijn }
10946f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1095a91dbff5SWillem de Bruijn 
109652267790SWillem de Bruijn struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
109752267790SWillem de Bruijn {
109852267790SWillem de Bruijn 	struct ubuf_info *uarg;
109952267790SWillem de Bruijn 	struct sk_buff *skb;
110052267790SWillem de Bruijn 
110152267790SWillem de Bruijn 	WARN_ON_ONCE(!in_task());
110252267790SWillem de Bruijn 
110352267790SWillem de Bruijn 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
110452267790SWillem de Bruijn 	if (!skb)
110552267790SWillem de Bruijn 		return NULL;
110652267790SWillem de Bruijn 
110752267790SWillem de Bruijn 	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
110852267790SWillem de Bruijn 	uarg = (void *)skb->cb;
1109a91dbff5SWillem de Bruijn 	uarg->mmp.user = NULL;
1110a91dbff5SWillem de Bruijn 
1111a91dbff5SWillem de Bruijn 	if (mm_account_pinned_pages(&uarg->mmp, size)) {
1112a91dbff5SWillem de Bruijn 		kfree_skb(skb);
1113a91dbff5SWillem de Bruijn 		return NULL;
1114a91dbff5SWillem de Bruijn 	}
111552267790SWillem de Bruijn 
111652267790SWillem de Bruijn 	uarg->callback = sock_zerocopy_callback;
11174ab6c99dSWillem de Bruijn 	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
11184ab6c99dSWillem de Bruijn 	uarg->len = 1;
11194ab6c99dSWillem de Bruijn 	uarg->bytelen = size;
112052267790SWillem de Bruijn 	uarg->zerocopy = 1;
1121c1d1b437SEric Dumazet 	refcount_set(&uarg->refcnt, 1);
112252267790SWillem de Bruijn 	sock_hold(sk);
112352267790SWillem de Bruijn 
112452267790SWillem de Bruijn 	return uarg;
112552267790SWillem de Bruijn }
112652267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
112752267790SWillem de Bruijn 
112852267790SWillem de Bruijn static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
112952267790SWillem de Bruijn {
113052267790SWillem de Bruijn 	return container_of((void *)uarg, struct sk_buff, cb);
113152267790SWillem de Bruijn }
113252267790SWillem de Bruijn 
11334ab6c99dSWillem de Bruijn struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
11344ab6c99dSWillem de Bruijn 					struct ubuf_info *uarg)
11354ab6c99dSWillem de Bruijn {
11364ab6c99dSWillem de Bruijn 	if (uarg) {
11374ab6c99dSWillem de Bruijn 		const u32 byte_limit = 1 << 19;		/* limit to a few TSO */
11384ab6c99dSWillem de Bruijn 		u32 bytelen, next;
11394ab6c99dSWillem de Bruijn 
11404ab6c99dSWillem de Bruijn 		/* realloc only when socket is locked (TCP, UDP cork),
11414ab6c99dSWillem de Bruijn 		 * so uarg->len and sk_zckey access is serialized
11424ab6c99dSWillem de Bruijn 		 */
11434ab6c99dSWillem de Bruijn 		if (!sock_owned_by_user(sk)) {
11444ab6c99dSWillem de Bruijn 			WARN_ON_ONCE(1);
11454ab6c99dSWillem de Bruijn 			return NULL;
11464ab6c99dSWillem de Bruijn 		}
11474ab6c99dSWillem de Bruijn 
11484ab6c99dSWillem de Bruijn 		bytelen = uarg->bytelen + size;
11494ab6c99dSWillem de Bruijn 		if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
11504ab6c99dSWillem de Bruijn 			/* TCP can create new skb to attach new uarg */
11514ab6c99dSWillem de Bruijn 			if (sk->sk_type == SOCK_STREAM)
11524ab6c99dSWillem de Bruijn 				goto new_alloc;
11534ab6c99dSWillem de Bruijn 			return NULL;
11544ab6c99dSWillem de Bruijn 		}
11554ab6c99dSWillem de Bruijn 
11564ab6c99dSWillem de Bruijn 		next = (u32)atomic_read(&sk->sk_zckey);
11574ab6c99dSWillem de Bruijn 		if ((u32)(uarg->id + uarg->len) == next) {
1158a91dbff5SWillem de Bruijn 			if (mm_account_pinned_pages(&uarg->mmp, size))
1159a91dbff5SWillem de Bruijn 				return NULL;
11604ab6c99dSWillem de Bruijn 			uarg->len++;
11614ab6c99dSWillem de Bruijn 			uarg->bytelen = bytelen;
11624ab6c99dSWillem de Bruijn 			atomic_set(&sk->sk_zckey, ++next);
1163100f6d8eSWillem de Bruijn 
1164100f6d8eSWillem de Bruijn 			/* no extra ref when appending to datagram (MSG_MORE) */
1165100f6d8eSWillem de Bruijn 			if (sk->sk_type == SOCK_STREAM)
1166db5bce32SEric Dumazet 				sock_zerocopy_get(uarg);
1167100f6d8eSWillem de Bruijn 
11684ab6c99dSWillem de Bruijn 			return uarg;
11694ab6c99dSWillem de Bruijn 		}
11704ab6c99dSWillem de Bruijn 	}
11714ab6c99dSWillem de Bruijn 
11724ab6c99dSWillem de Bruijn new_alloc:
11734ab6c99dSWillem de Bruijn 	return sock_zerocopy_alloc(sk, size);
11744ab6c99dSWillem de Bruijn }
11754ab6c99dSWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
11764ab6c99dSWillem de Bruijn 
11774ab6c99dSWillem de Bruijn static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
11784ab6c99dSWillem de Bruijn {
11794ab6c99dSWillem de Bruijn 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
11804ab6c99dSWillem de Bruijn 	u32 old_lo, old_hi;
11814ab6c99dSWillem de Bruijn 	u64 sum_len;
11824ab6c99dSWillem de Bruijn 
11834ab6c99dSWillem de Bruijn 	old_lo = serr->ee.ee_info;
11844ab6c99dSWillem de Bruijn 	old_hi = serr->ee.ee_data;
11854ab6c99dSWillem de Bruijn 	sum_len = old_hi - old_lo + 1ULL + len;
11864ab6c99dSWillem de Bruijn 
11874ab6c99dSWillem de Bruijn 	if (sum_len >= (1ULL << 32))
11884ab6c99dSWillem de Bruijn 		return false;
11894ab6c99dSWillem de Bruijn 
11904ab6c99dSWillem de Bruijn 	if (lo != old_hi + 1)
11914ab6c99dSWillem de Bruijn 		return false;
11924ab6c99dSWillem de Bruijn 
11934ab6c99dSWillem de Bruijn 	serr->ee.ee_data += len;
11944ab6c99dSWillem de Bruijn 	return true;
11954ab6c99dSWillem de Bruijn }
11964ab6c99dSWillem de Bruijn 
119752267790SWillem de Bruijn void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
119852267790SWillem de Bruijn {
11994ab6c99dSWillem de Bruijn 	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
120052267790SWillem de Bruijn 	struct sock_exterr_skb *serr;
120152267790SWillem de Bruijn 	struct sock *sk = skb->sk;
12024ab6c99dSWillem de Bruijn 	struct sk_buff_head *q;
12034ab6c99dSWillem de Bruijn 	unsigned long flags;
12044ab6c99dSWillem de Bruijn 	u32 lo, hi;
12054ab6c99dSWillem de Bruijn 	u16 len;
120652267790SWillem de Bruijn 
1207ccaffff1SWillem de Bruijn 	mm_unaccount_pinned_pages(&uarg->mmp);
1208ccaffff1SWillem de Bruijn 
12094ab6c99dSWillem de Bruijn 	/* if !len, there was only 1 call, and it was aborted
12104ab6c99dSWillem de Bruijn 	 * so do not queue a completion notification
12114ab6c99dSWillem de Bruijn 	 */
12124ab6c99dSWillem de Bruijn 	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
121352267790SWillem de Bruijn 		goto release;
121452267790SWillem de Bruijn 
12154ab6c99dSWillem de Bruijn 	len = uarg->len;
12164ab6c99dSWillem de Bruijn 	lo = uarg->id;
12174ab6c99dSWillem de Bruijn 	hi = uarg->id + len - 1;
12184ab6c99dSWillem de Bruijn 
121952267790SWillem de Bruijn 	serr = SKB_EXT_ERR(skb);
122052267790SWillem de Bruijn 	memset(serr, 0, sizeof(*serr));
122152267790SWillem de Bruijn 	serr->ee.ee_errno = 0;
122252267790SWillem de Bruijn 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
12234ab6c99dSWillem de Bruijn 	serr->ee.ee_data = hi;
12244ab6c99dSWillem de Bruijn 	serr->ee.ee_info = lo;
122552267790SWillem de Bruijn 	if (!success)
122652267790SWillem de Bruijn 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
122752267790SWillem de Bruijn 
12284ab6c99dSWillem de Bruijn 	q = &sk->sk_error_queue;
12294ab6c99dSWillem de Bruijn 	spin_lock_irqsave(&q->lock, flags);
12304ab6c99dSWillem de Bruijn 	tail = skb_peek_tail(q);
12314ab6c99dSWillem de Bruijn 	if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
12324ab6c99dSWillem de Bruijn 	    !skb_zerocopy_notify_extend(tail, lo, len)) {
12334ab6c99dSWillem de Bruijn 		__skb_queue_tail(q, skb);
123452267790SWillem de Bruijn 		skb = NULL;
12354ab6c99dSWillem de Bruijn 	}
12364ab6c99dSWillem de Bruijn 	spin_unlock_irqrestore(&q->lock, flags);
123752267790SWillem de Bruijn 
123852267790SWillem de Bruijn 	sk->sk_error_report(sk);
123952267790SWillem de Bruijn 
124052267790SWillem de Bruijn release:
124152267790SWillem de Bruijn 	consume_skb(skb);
124252267790SWillem de Bruijn 	sock_put(sk);
124352267790SWillem de Bruijn }
124452267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
124552267790SWillem de Bruijn 
124652267790SWillem de Bruijn void sock_zerocopy_put(struct ubuf_info *uarg)
124752267790SWillem de Bruijn {
1248c1d1b437SEric Dumazet 	if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
124952267790SWillem de Bruijn 		if (uarg->callback)
125052267790SWillem de Bruijn 			uarg->callback(uarg, uarg->zerocopy);
125152267790SWillem de Bruijn 		else
125252267790SWillem de Bruijn 			consume_skb(skb_from_uarg(uarg));
125352267790SWillem de Bruijn 	}
125452267790SWillem de Bruijn }
125552267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_put);
125652267790SWillem de Bruijn 
125752900d22SWillem de Bruijn void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
125852267790SWillem de Bruijn {
125952267790SWillem de Bruijn 	if (uarg) {
126052267790SWillem de Bruijn 		struct sock *sk = skb_from_uarg(uarg)->sk;
126152267790SWillem de Bruijn 
126252267790SWillem de Bruijn 		atomic_dec(&sk->sk_zckey);
12634ab6c99dSWillem de Bruijn 		uarg->len--;
126452267790SWillem de Bruijn 
126552900d22SWillem de Bruijn 		if (have_uref)
126652267790SWillem de Bruijn 			sock_zerocopy_put(uarg);
126752267790SWillem de Bruijn 	}
126852267790SWillem de Bruijn }
126952267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
127052267790SWillem de Bruijn 
1271b5947e5dSWillem de Bruijn int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1272b5947e5dSWillem de Bruijn {
1273b5947e5dSWillem de Bruijn 	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1274b5947e5dSWillem de Bruijn }
1275b5947e5dSWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1276b5947e5dSWillem de Bruijn 
127752267790SWillem de Bruijn int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
127852267790SWillem de Bruijn 			     struct msghdr *msg, int len,
127952267790SWillem de Bruijn 			     struct ubuf_info *uarg)
128052267790SWillem de Bruijn {
12814ab6c99dSWillem de Bruijn 	struct ubuf_info *orig_uarg = skb_zcopy(skb);
128252267790SWillem de Bruijn 	struct iov_iter orig_iter = msg->msg_iter;
128352267790SWillem de Bruijn 	int err, orig_len = skb->len;
128452267790SWillem de Bruijn 
12854ab6c99dSWillem de Bruijn 	/* An skb can only point to one uarg. This edge case happens when
12864ab6c99dSWillem de Bruijn 	 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
12874ab6c99dSWillem de Bruijn 	 */
12884ab6c99dSWillem de Bruijn 	if (orig_uarg && uarg != orig_uarg)
12894ab6c99dSWillem de Bruijn 		return -EEXIST;
12904ab6c99dSWillem de Bruijn 
129152267790SWillem de Bruijn 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
129252267790SWillem de Bruijn 	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
129354d43117SWillem de Bruijn 		struct sock *save_sk = skb->sk;
129454d43117SWillem de Bruijn 
129552267790SWillem de Bruijn 		/* Streams do not free skb on error. Reset to prev state. */
129652267790SWillem de Bruijn 		msg->msg_iter = orig_iter;
129754d43117SWillem de Bruijn 		skb->sk = sk;
129852267790SWillem de Bruijn 		___pskb_trim(skb, orig_len);
129954d43117SWillem de Bruijn 		skb->sk = save_sk;
130052267790SWillem de Bruijn 		return err;
130152267790SWillem de Bruijn 	}
130252267790SWillem de Bruijn 
130352900d22SWillem de Bruijn 	skb_zcopy_set(skb, uarg, NULL);
130452267790SWillem de Bruijn 	return skb->len - orig_len;
130552267790SWillem de Bruijn }
130652267790SWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
130752267790SWillem de Bruijn 
13081f8b977aSWillem de Bruijn static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
130952267790SWillem de Bruijn 			      gfp_t gfp_mask)
131052267790SWillem de Bruijn {
131152267790SWillem de Bruijn 	if (skb_zcopy(orig)) {
131252267790SWillem de Bruijn 		if (skb_zcopy(nskb)) {
131352267790SWillem de Bruijn 			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
131452267790SWillem de Bruijn 			if (!gfp_mask) {
131552267790SWillem de Bruijn 				WARN_ON_ONCE(1);
131652267790SWillem de Bruijn 				return -ENOMEM;
131752267790SWillem de Bruijn 			}
131852267790SWillem de Bruijn 			if (skb_uarg(nskb) == skb_uarg(orig))
131952267790SWillem de Bruijn 				return 0;
132052267790SWillem de Bruijn 			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
132152267790SWillem de Bruijn 				return -EIO;
132252267790SWillem de Bruijn 		}
132352900d22SWillem de Bruijn 		skb_zcopy_set(nskb, skb_uarg(orig), NULL);
132452267790SWillem de Bruijn 	}
132552267790SWillem de Bruijn 	return 0;
132652267790SWillem de Bruijn }
132752267790SWillem de Bruijn 
13282c53040fSBen Hutchings /**
13292c53040fSBen Hutchings  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
133048c83012SMichael S. Tsirkin  *	@skb: the skb to modify
133148c83012SMichael S. Tsirkin  *	@gfp_mask: allocation priority
133248c83012SMichael S. Tsirkin  *
133348c83012SMichael S. Tsirkin  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
133448c83012SMichael S. Tsirkin  *	It will copy all frags into kernel and drop the reference
133548c83012SMichael S. Tsirkin  *	to userspace pages.
133648c83012SMichael S. Tsirkin  *
133748c83012SMichael S. Tsirkin  *	If this function is called from an interrupt gfp_mask() must be
133848c83012SMichael S. Tsirkin  *	%GFP_ATOMIC.
133948c83012SMichael S. Tsirkin  *
134048c83012SMichael S. Tsirkin  *	Returns 0 on success or a negative error code on failure
134148c83012SMichael S. Tsirkin  *	to allocate kernel memory to copy to.
134248c83012SMichael S. Tsirkin  */
134348c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1344a6686f2fSShirley Ma {
1345a6686f2fSShirley Ma 	int num_frags = skb_shinfo(skb)->nr_frags;
1346a6686f2fSShirley Ma 	struct page *page, *head = NULL;
13473ece7826SWillem de Bruijn 	int i, new_frags;
13483ece7826SWillem de Bruijn 	u32 d_off;
1349a6686f2fSShirley Ma 
13503ece7826SWillem de Bruijn 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
13513ece7826SWillem de Bruijn 		return -EINVAL;
13523ece7826SWillem de Bruijn 
1353f72c4ac6SWillem de Bruijn 	if (!num_frags)
1354f72c4ac6SWillem de Bruijn 		goto release;
1355f72c4ac6SWillem de Bruijn 
13563ece7826SWillem de Bruijn 	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
13573ece7826SWillem de Bruijn 	for (i = 0; i < new_frags; i++) {
135802756ed4SKrishna Kumar 		page = alloc_page(gfp_mask);
1359a6686f2fSShirley Ma 		if (!page) {
1360a6686f2fSShirley Ma 			while (head) {
136140dadff2SSunghan Suh 				struct page *next = (struct page *)page_private(head);
1362a6686f2fSShirley Ma 				put_page(head);
1363a6686f2fSShirley Ma 				head = next;
1364a6686f2fSShirley Ma 			}
1365a6686f2fSShirley Ma 			return -ENOMEM;
1366a6686f2fSShirley Ma 		}
13673ece7826SWillem de Bruijn 		set_page_private(page, (unsigned long)head);
13683ece7826SWillem de Bruijn 		head = page;
13693ece7826SWillem de Bruijn 	}
13703ece7826SWillem de Bruijn 
13713ece7826SWillem de Bruijn 	page = head;
13723ece7826SWillem de Bruijn 	d_off = 0;
13733ece7826SWillem de Bruijn 	for (i = 0; i < num_frags; i++) {
13743ece7826SWillem de Bruijn 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
13753ece7826SWillem de Bruijn 		u32 p_off, p_len, copied;
13763ece7826SWillem de Bruijn 		struct page *p;
13773ece7826SWillem de Bruijn 		u8 *vaddr;
1378c613c209SWillem de Bruijn 
1379b54c9d5bSJonathan Lemon 		skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1380c613c209SWillem de Bruijn 				      p, p_off, p_len, copied) {
13813ece7826SWillem de Bruijn 			u32 copy, done = 0;
1382c613c209SWillem de Bruijn 			vaddr = kmap_atomic(p);
13833ece7826SWillem de Bruijn 
13843ece7826SWillem de Bruijn 			while (done < p_len) {
13853ece7826SWillem de Bruijn 				if (d_off == PAGE_SIZE) {
13863ece7826SWillem de Bruijn 					d_off = 0;
13873ece7826SWillem de Bruijn 					page = (struct page *)page_private(page);
13883ece7826SWillem de Bruijn 				}
13893ece7826SWillem de Bruijn 				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
13903ece7826SWillem de Bruijn 				memcpy(page_address(page) + d_off,
13913ece7826SWillem de Bruijn 				       vaddr + p_off + done, copy);
13923ece7826SWillem de Bruijn 				done += copy;
13933ece7826SWillem de Bruijn 				d_off += copy;
13943ece7826SWillem de Bruijn 			}
139551c56b00SEric Dumazet 			kunmap_atomic(vaddr);
1396c613c209SWillem de Bruijn 		}
1397a6686f2fSShirley Ma 	}
1398a6686f2fSShirley Ma 
1399a6686f2fSShirley Ma 	/* skb frags release userspace buffers */
140002756ed4SKrishna Kumar 	for (i = 0; i < num_frags; i++)
1401a8605c60SIan Campbell 		skb_frag_unref(skb, i);
1402a6686f2fSShirley Ma 
1403a6686f2fSShirley Ma 	/* skb frags point to kernel buffers */
14043ece7826SWillem de Bruijn 	for (i = 0; i < new_frags - 1; i++) {
14053ece7826SWillem de Bruijn 		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
140640dadff2SSunghan Suh 		head = (struct page *)page_private(head);
1407a6686f2fSShirley Ma 	}
14083ece7826SWillem de Bruijn 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
14093ece7826SWillem de Bruijn 	skb_shinfo(skb)->nr_frags = new_frags;
141048c83012SMichael S. Tsirkin 
1411b90ddd56SWillem de Bruijn release:
14121f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, false);
1413a6686f2fSShirley Ma 	return 0;
1414a6686f2fSShirley Ma }
1415dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1416a6686f2fSShirley Ma 
1417e0053ec0SHerbert Xu /**
1418e0053ec0SHerbert Xu  *	skb_clone	-	duplicate an sk_buff
1419e0053ec0SHerbert Xu  *	@skb: buffer to clone
1420e0053ec0SHerbert Xu  *	@gfp_mask: allocation priority
1421e0053ec0SHerbert Xu  *
1422e0053ec0SHerbert Xu  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
1423e0053ec0SHerbert Xu  *	copies share the same packet data but not structure. The new
1424e0053ec0SHerbert Xu  *	buffer has a reference count of 1. If the allocation fails the
1425e0053ec0SHerbert Xu  *	function returns %NULL otherwise the new buffer is returned.
1426e0053ec0SHerbert Xu  *
1427e0053ec0SHerbert Xu  *	If this function is called from an interrupt gfp_mask() must be
1428e0053ec0SHerbert Xu  *	%GFP_ATOMIC.
1429e0053ec0SHerbert Xu  */
1430e0053ec0SHerbert Xu 
1431e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1432e0053ec0SHerbert Xu {
1433d0bf4a9eSEric Dumazet 	struct sk_buff_fclones *fclones = container_of(skb,
1434d0bf4a9eSEric Dumazet 						       struct sk_buff_fclones,
1435d0bf4a9eSEric Dumazet 						       skb1);
14366ffe75ebSEric Dumazet 	struct sk_buff *n;
1437e0053ec0SHerbert Xu 
143870008aa5SMichael S. Tsirkin 	if (skb_orphan_frags(skb, gfp_mask))
1439a6686f2fSShirley Ma 		return NULL;
1440a6686f2fSShirley Ma 
1441e0053ec0SHerbert Xu 	if (skb->fclone == SKB_FCLONE_ORIG &&
14422638595aSReshetova, Elena 	    refcount_read(&fclones->fclone_ref) == 1) {
14436ffe75ebSEric Dumazet 		n = &fclones->skb2;
14442638595aSReshetova, Elena 		refcount_set(&fclones->fclone_ref, 2);
1445e0053ec0SHerbert Xu 	} else {
1446c93bdd0eSMel Gorman 		if (skb_pfmemalloc(skb))
1447c93bdd0eSMel Gorman 			gfp_mask |= __GFP_MEMALLOC;
1448c93bdd0eSMel Gorman 
1449e0053ec0SHerbert Xu 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1450e0053ec0SHerbert Xu 		if (!n)
1451e0053ec0SHerbert Xu 			return NULL;
1452fe55f6d5SVegard Nossum 
1453e0053ec0SHerbert Xu 		n->fclone = SKB_FCLONE_UNAVAILABLE;
1454e0053ec0SHerbert Xu 	}
1455e0053ec0SHerbert Xu 
1456e0053ec0SHerbert Xu 	return __skb_clone(n, skb);
14571da177e4SLinus Torvalds }
1458b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone);
14591da177e4SLinus Torvalds 
1460b0768a86SToshiaki Makita void skb_headers_offset_update(struct sk_buff *skb, int off)
1461f5b17294SPravin B Shelar {
1462030737bcSEric Dumazet 	/* Only adjust this if it actually is csum_start rather than csum */
1463030737bcSEric Dumazet 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1464030737bcSEric Dumazet 		skb->csum_start += off;
1465f5b17294SPravin B Shelar 	/* {transport,network,mac}_header and tail are relative to skb->head */
1466f5b17294SPravin B Shelar 	skb->transport_header += off;
1467f5b17294SPravin B Shelar 	skb->network_header   += off;
1468f5b17294SPravin B Shelar 	if (skb_mac_header_was_set(skb))
1469f5b17294SPravin B Shelar 		skb->mac_header += off;
1470f5b17294SPravin B Shelar 	skb->inner_transport_header += off;
1471f5b17294SPravin B Shelar 	skb->inner_network_header += off;
1472aefbd2b3SPravin B Shelar 	skb->inner_mac_header += off;
1473f5b17294SPravin B Shelar }
1474b0768a86SToshiaki Makita EXPORT_SYMBOL(skb_headers_offset_update);
1475f5b17294SPravin B Shelar 
147608303c18SIlya Lesokhin void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
14771da177e4SLinus Torvalds {
1478dec18810SHerbert Xu 	__copy_skb_header(new, old);
1479dec18810SHerbert Xu 
14807967168cSHerbert Xu 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
14817967168cSHerbert Xu 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
14827967168cSHerbert Xu 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
14831da177e4SLinus Torvalds }
148408303c18SIlya Lesokhin EXPORT_SYMBOL(skb_copy_header);
14851da177e4SLinus Torvalds 
1486c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1487c93bdd0eSMel Gorman {
1488c93bdd0eSMel Gorman 	if (skb_pfmemalloc(skb))
1489c93bdd0eSMel Gorman 		return SKB_ALLOC_RX;
1490c93bdd0eSMel Gorman 	return 0;
1491c93bdd0eSMel Gorman }
1492c93bdd0eSMel Gorman 
14931da177e4SLinus Torvalds /**
14941da177e4SLinus Torvalds  *	skb_copy	-	create private copy of an sk_buff
14951da177e4SLinus Torvalds  *	@skb: buffer to copy
14961da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
14971da177e4SLinus Torvalds  *
14981da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and its data. This is used when the
14991da177e4SLinus Torvalds  *	caller wishes to modify the data and needs a private copy of the
15001da177e4SLinus Torvalds  *	data to alter. Returns %NULL on failure or the pointer to the buffer
15011da177e4SLinus Torvalds  *	on success. The returned buffer has a reference count of 1.
15021da177e4SLinus Torvalds  *
15031da177e4SLinus Torvalds  *	As by-product this function converts non-linear &sk_buff to linear
15041da177e4SLinus Torvalds  *	one, so that &sk_buff becomes completely private and caller is allowed
15051da177e4SLinus Torvalds  *	to modify all the data of returned buffer. This means that this
15061da177e4SLinus Torvalds  *	function is not recommended for use in circumstances when only
15071da177e4SLinus Torvalds  *	header is going to be modified. Use pskb_copy() instead.
15081da177e4SLinus Torvalds  */
15091da177e4SLinus Torvalds 
1510dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
15111da177e4SLinus Torvalds {
15126602cebbSEric Dumazet 	int headerlen = skb_headroom(skb);
1513ec47ea82SAlexander Duyck 	unsigned int size = skb_end_offset(skb) + skb->data_len;
1514c93bdd0eSMel Gorman 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
1515c93bdd0eSMel Gorman 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
15166602cebbSEric Dumazet 
15171da177e4SLinus Torvalds 	if (!n)
15181da177e4SLinus Torvalds 		return NULL;
15191da177e4SLinus Torvalds 
15201da177e4SLinus Torvalds 	/* Set the data pointer */
15211da177e4SLinus Torvalds 	skb_reserve(n, headerlen);
15221da177e4SLinus Torvalds 	/* Set the tail pointer and length */
15231da177e4SLinus Torvalds 	skb_put(n, skb->len);
15241da177e4SLinus Torvalds 
15259f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
15261da177e4SLinus Torvalds 
152708303c18SIlya Lesokhin 	skb_copy_header(n, skb);
15281da177e4SLinus Torvalds 	return n;
15291da177e4SLinus Torvalds }
1530b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy);
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds /**
1533bad93e9dSOctavian Purdila  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
15341da177e4SLinus Torvalds  *	@skb: buffer to copy
1535117632e6SEric Dumazet  *	@headroom: headroom of new skb
15361da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
1537bad93e9dSOctavian Purdila  *	@fclone: if true allocate the copy of the skb from the fclone
1538bad93e9dSOctavian Purdila  *	cache instead of the head cache; it is recommended to set this
1539bad93e9dSOctavian Purdila  *	to true for the cases where the copy will likely be cloned
15401da177e4SLinus Torvalds  *
15411da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and part of its data, located
15421da177e4SLinus Torvalds  *	in header. Fragmented data remain shared. This is used when
15431da177e4SLinus Torvalds  *	the caller wishes to modify only header of &sk_buff and needs
15441da177e4SLinus Torvalds  *	private copy of the header to alter. Returns %NULL on failure
15451da177e4SLinus Torvalds  *	or the pointer to the buffer on success.
15461da177e4SLinus Torvalds  *	The returned buffer has a reference count of 1.
15471da177e4SLinus Torvalds  */
15481da177e4SLinus Torvalds 
1549bad93e9dSOctavian Purdila struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1550bad93e9dSOctavian Purdila 				   gfp_t gfp_mask, bool fclone)
15511da177e4SLinus Torvalds {
1552117632e6SEric Dumazet 	unsigned int size = skb_headlen(skb) + headroom;
1553bad93e9dSOctavian Purdila 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1554bad93e9dSOctavian Purdila 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
15556602cebbSEric Dumazet 
15561da177e4SLinus Torvalds 	if (!n)
15571da177e4SLinus Torvalds 		goto out;
15581da177e4SLinus Torvalds 
15591da177e4SLinus Torvalds 	/* Set the data pointer */
1560117632e6SEric Dumazet 	skb_reserve(n, headroom);
15611da177e4SLinus Torvalds 	/* Set the tail pointer and length */
15621da177e4SLinus Torvalds 	skb_put(n, skb_headlen(skb));
15631da177e4SLinus Torvalds 	/* Copy the bytes */
1564d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data(skb, n->data, n->len);
15651da177e4SLinus Torvalds 
156625f484a6SHerbert Xu 	n->truesize += skb->data_len;
15671da177e4SLinus Torvalds 	n->data_len  = skb->data_len;
15681da177e4SLinus Torvalds 	n->len	     = skb->len;
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds 	if (skb_shinfo(skb)->nr_frags) {
15711da177e4SLinus Torvalds 		int i;
15721da177e4SLinus Torvalds 
15731f8b977aSWillem de Bruijn 		if (skb_orphan_frags(skb, gfp_mask) ||
15741f8b977aSWillem de Bruijn 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
15751511022cSDan Carpenter 			kfree_skb(n);
15761511022cSDan Carpenter 			n = NULL;
1577a6686f2fSShirley Ma 			goto out;
1578a6686f2fSShirley Ma 		}
15791da177e4SLinus Torvalds 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
15801da177e4SLinus Torvalds 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1581ea2ab693SIan Campbell 			skb_frag_ref(skb, i);
15821da177e4SLinus Torvalds 		}
15831da177e4SLinus Torvalds 		skb_shinfo(n)->nr_frags = i;
15841da177e4SLinus Torvalds 	}
15851da177e4SLinus Torvalds 
158621dc3301SDavid S. Miller 	if (skb_has_frag_list(skb)) {
15871da177e4SLinus Torvalds 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
15881da177e4SLinus Torvalds 		skb_clone_fraglist(n);
15891da177e4SLinus Torvalds 	}
15901da177e4SLinus Torvalds 
159108303c18SIlya Lesokhin 	skb_copy_header(n, skb);
15921da177e4SLinus Torvalds out:
15931da177e4SLinus Torvalds 	return n;
15941da177e4SLinus Torvalds }
1595bad93e9dSOctavian Purdila EXPORT_SYMBOL(__pskb_copy_fclone);
15961da177e4SLinus Torvalds 
15971da177e4SLinus Torvalds /**
15981da177e4SLinus Torvalds  *	pskb_expand_head - reallocate header of &sk_buff
15991da177e4SLinus Torvalds  *	@skb: buffer to reallocate
16001da177e4SLinus Torvalds  *	@nhead: room to add at head
16011da177e4SLinus Torvalds  *	@ntail: room to add at tail
16021da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
16031da177e4SLinus Torvalds  *
1604bc32383cSMathias Krause  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1605bc32383cSMathias Krause  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
16061da177e4SLinus Torvalds  *	reference count of 1. Returns zero in the case of success or error,
16071da177e4SLinus Torvalds  *	if expansion failed. In the last case, &sk_buff is not changed.
16081da177e4SLinus Torvalds  *
16091da177e4SLinus Torvalds  *	All the pointers pointing into skb header may change and must be
16101da177e4SLinus Torvalds  *	reloaded after call to this function.
16111da177e4SLinus Torvalds  */
16121da177e4SLinus Torvalds 
161386a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1614dd0fc66fSAl Viro 		     gfp_t gfp_mask)
16151da177e4SLinus Torvalds {
1616158f323bSEric Dumazet 	int i, osize = skb_end_offset(skb);
1617158f323bSEric Dumazet 	int size = osize + nhead + ntail;
16181da177e4SLinus Torvalds 	long off;
1619158f323bSEric Dumazet 	u8 *data;
16201da177e4SLinus Torvalds 
16214edd87adSHerbert Xu 	BUG_ON(nhead < 0);
16224edd87adSHerbert Xu 
16239f77fad3STim Hansen 	BUG_ON(skb_shared(skb));
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds 	size = SKB_DATA_ALIGN(size);
16261da177e4SLinus Torvalds 
1627c93bdd0eSMel Gorman 	if (skb_pfmemalloc(skb))
1628c93bdd0eSMel Gorman 		gfp_mask |= __GFP_MEMALLOC;
1629c93bdd0eSMel Gorman 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1630c93bdd0eSMel Gorman 			       gfp_mask, NUMA_NO_NODE, NULL);
16311da177e4SLinus Torvalds 	if (!data)
16321da177e4SLinus Torvalds 		goto nodata;
163387151b86SEric Dumazet 	size = SKB_WITH_OVERHEAD(ksize(data));
16341da177e4SLinus Torvalds 
16351da177e4SLinus Torvalds 	/* Copy only real data... and, alas, header. This should be
16366602cebbSEric Dumazet 	 * optimized for the cases when header is void.
16376602cebbSEric Dumazet 	 */
16386602cebbSEric Dumazet 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
16396602cebbSEric Dumazet 
16406602cebbSEric Dumazet 	memcpy((struct skb_shared_info *)(data + size),
16416602cebbSEric Dumazet 	       skb_shinfo(skb),
1642fed66381SEric Dumazet 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
16431da177e4SLinus Torvalds 
16443e24591aSAlexander Duyck 	/*
16453e24591aSAlexander Duyck 	 * if shinfo is shared we must drop the old head gracefully, but if it
16463e24591aSAlexander Duyck 	 * is not we can just drop the old head and let the existing refcount
16473e24591aSAlexander Duyck 	 * be since all we did is relocate the values
16483e24591aSAlexander Duyck 	 */
16493e24591aSAlexander Duyck 	if (skb_cloned(skb)) {
165070008aa5SMichael S. Tsirkin 		if (skb_orphan_frags(skb, gfp_mask))
1651a6686f2fSShirley Ma 			goto nofrags;
16521f8b977aSWillem de Bruijn 		if (skb_zcopy(skb))
1653c1d1b437SEric Dumazet 			refcount_inc(&skb_uarg(skb)->refcnt);
16541da177e4SLinus Torvalds 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1655ea2ab693SIan Campbell 			skb_frag_ref(skb, i);
16561da177e4SLinus Torvalds 
165721dc3301SDavid S. Miller 		if (skb_has_frag_list(skb))
16581da177e4SLinus Torvalds 			skb_clone_fraglist(skb);
16591da177e4SLinus Torvalds 
16601da177e4SLinus Torvalds 		skb_release_data(skb);
16613e24591aSAlexander Duyck 	} else {
16623e24591aSAlexander Duyck 		skb_free_head(skb);
16631fd63041SEric Dumazet 	}
16641da177e4SLinus Torvalds 	off = (data + nhead) - skb->head;
16651da177e4SLinus Torvalds 
16661da177e4SLinus Torvalds 	skb->head     = data;
1667d3836f21SEric Dumazet 	skb->head_frag = 0;
16681da177e4SLinus Torvalds 	skb->data    += off;
16694305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET
16704305b541SArnaldo Carvalho de Melo 	skb->end      = size;
167156eb8882SPatrick McHardy 	off           = nhead;
16724305b541SArnaldo Carvalho de Melo #else
16734305b541SArnaldo Carvalho de Melo 	skb->end      = skb->head + size;
167456eb8882SPatrick McHardy #endif
167527a884dcSArnaldo Carvalho de Melo 	skb->tail	      += off;
1676b41abb42SPeter Pan(潘卫平) 	skb_headers_offset_update(skb, nhead);
16771da177e4SLinus Torvalds 	skb->cloned   = 0;
1678334a8132SPatrick McHardy 	skb->hdr_len  = 0;
16791da177e4SLinus Torvalds 	skb->nohdr    = 0;
16801da177e4SLinus Torvalds 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1681158f323bSEric Dumazet 
1682de8f3a83SDaniel Borkmann 	skb_metadata_clear(skb);
1683de8f3a83SDaniel Borkmann 
1684158f323bSEric Dumazet 	/* It is not generally safe to change skb->truesize.
1685158f323bSEric Dumazet 	 * For the moment, we really care of rx path, or
1686158f323bSEric Dumazet 	 * when skb is orphaned (not attached to a socket).
1687158f323bSEric Dumazet 	 */
1688158f323bSEric Dumazet 	if (!skb->sk || skb->destructor == sock_edemux)
1689158f323bSEric Dumazet 		skb->truesize += size - osize;
1690158f323bSEric Dumazet 
16911da177e4SLinus Torvalds 	return 0;
16921da177e4SLinus Torvalds 
1693a6686f2fSShirley Ma nofrags:
1694a6686f2fSShirley Ma 	kfree(data);
16951da177e4SLinus Torvalds nodata:
16961da177e4SLinus Torvalds 	return -ENOMEM;
16971da177e4SLinus Torvalds }
1698b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head);
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */
17011da177e4SLinus Torvalds 
17021da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
17031da177e4SLinus Torvalds {
17041da177e4SLinus Torvalds 	struct sk_buff *skb2;
17051da177e4SLinus Torvalds 	int delta = headroom - skb_headroom(skb);
17061da177e4SLinus Torvalds 
17071da177e4SLinus Torvalds 	if (delta <= 0)
17081da177e4SLinus Torvalds 		skb2 = pskb_copy(skb, GFP_ATOMIC);
17091da177e4SLinus Torvalds 	else {
17101da177e4SLinus Torvalds 		skb2 = skb_clone(skb, GFP_ATOMIC);
17111da177e4SLinus Torvalds 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
17121da177e4SLinus Torvalds 					     GFP_ATOMIC)) {
17131da177e4SLinus Torvalds 			kfree_skb(skb2);
17141da177e4SLinus Torvalds 			skb2 = NULL;
17151da177e4SLinus Torvalds 		}
17161da177e4SLinus Torvalds 	}
17171da177e4SLinus Torvalds 	return skb2;
17181da177e4SLinus Torvalds }
1719b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom);
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds /**
17221da177e4SLinus Torvalds  *	skb_copy_expand	-	copy and expand sk_buff
17231da177e4SLinus Torvalds  *	@skb: buffer to copy
17241da177e4SLinus Torvalds  *	@newheadroom: new free bytes at head
17251da177e4SLinus Torvalds  *	@newtailroom: new free bytes at tail
17261da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
17271da177e4SLinus Torvalds  *
17281da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and its data and while doing so
17291da177e4SLinus Torvalds  *	allocate additional space.
17301da177e4SLinus Torvalds  *
17311da177e4SLinus Torvalds  *	This is used when the caller wishes to modify the data and needs a
17321da177e4SLinus Torvalds  *	private copy of the data to alter as well as more space for new fields.
17331da177e4SLinus Torvalds  *	Returns %NULL on failure or the pointer to the buffer
17341da177e4SLinus Torvalds  *	on success. The returned buffer has a reference count of 1.
17351da177e4SLinus Torvalds  *
17361da177e4SLinus Torvalds  *	You must pass %GFP_ATOMIC as the allocation priority if this function
17371da177e4SLinus Torvalds  *	is called from an interrupt.
17381da177e4SLinus Torvalds  */
17391da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
174086a76cafSVictor Fusco 				int newheadroom, int newtailroom,
1741dd0fc66fSAl Viro 				gfp_t gfp_mask)
17421da177e4SLinus Torvalds {
17431da177e4SLinus Torvalds 	/*
17441da177e4SLinus Torvalds 	 *	Allocate the copy buffer
17451da177e4SLinus Torvalds 	 */
1746c93bdd0eSMel Gorman 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1747c93bdd0eSMel Gorman 					gfp_mask, skb_alloc_rx_flag(skb),
1748c93bdd0eSMel Gorman 					NUMA_NO_NODE);
1749efd1e8d5SPatrick McHardy 	int oldheadroom = skb_headroom(skb);
17501da177e4SLinus Torvalds 	int head_copy_len, head_copy_off;
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds 	if (!n)
17531da177e4SLinus Torvalds 		return NULL;
17541da177e4SLinus Torvalds 
17551da177e4SLinus Torvalds 	skb_reserve(n, newheadroom);
17561da177e4SLinus Torvalds 
17571da177e4SLinus Torvalds 	/* Set the tail pointer and length */
17581da177e4SLinus Torvalds 	skb_put(n, skb->len);
17591da177e4SLinus Torvalds 
1760efd1e8d5SPatrick McHardy 	head_copy_len = oldheadroom;
17611da177e4SLinus Torvalds 	head_copy_off = 0;
17621da177e4SLinus Torvalds 	if (newheadroom <= head_copy_len)
17631da177e4SLinus Torvalds 		head_copy_len = newheadroom;
17641da177e4SLinus Torvalds 	else
17651da177e4SLinus Torvalds 		head_copy_off = newheadroom - head_copy_len;
17661da177e4SLinus Torvalds 
17671da177e4SLinus Torvalds 	/* Copy the linear header and data. */
17689f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
17699f77fad3STim Hansen 			     skb->len + head_copy_len));
17701da177e4SLinus Torvalds 
177108303c18SIlya Lesokhin 	skb_copy_header(n, skb);
17721da177e4SLinus Torvalds 
1773030737bcSEric Dumazet 	skb_headers_offset_update(n, newheadroom - oldheadroom);
1774efd1e8d5SPatrick McHardy 
17751da177e4SLinus Torvalds 	return n;
17761da177e4SLinus Torvalds }
1777b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand);
17781da177e4SLinus Torvalds 
17791da177e4SLinus Torvalds /**
1780cd0a137aSFlorian Fainelli  *	__skb_pad		-	zero pad the tail of an skb
17811da177e4SLinus Torvalds  *	@skb: buffer to pad
17821da177e4SLinus Torvalds  *	@pad: space to pad
1783cd0a137aSFlorian Fainelli  *	@free_on_error: free buffer on error
17841da177e4SLinus Torvalds  *
17851da177e4SLinus Torvalds  *	Ensure that a buffer is followed by a padding area that is zero
17861da177e4SLinus Torvalds  *	filled. Used by network drivers which may DMA or transfer data
17871da177e4SLinus Torvalds  *	beyond the buffer end onto the wire.
17881da177e4SLinus Torvalds  *
1789cd0a137aSFlorian Fainelli  *	May return error in out of memory cases. The skb is freed on error
1790cd0a137aSFlorian Fainelli  *	if @free_on_error is true.
17911da177e4SLinus Torvalds  */
17921da177e4SLinus Torvalds 
1793cd0a137aSFlorian Fainelli int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
17941da177e4SLinus Torvalds {
17955b057c6bSHerbert Xu 	int err;
17965b057c6bSHerbert Xu 	int ntail;
17971da177e4SLinus Torvalds 
17981da177e4SLinus Torvalds 	/* If the skbuff is non linear tailroom is always zero.. */
17995b057c6bSHerbert Xu 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
18001da177e4SLinus Torvalds 		memset(skb->data+skb->len, 0, pad);
18015b057c6bSHerbert Xu 		return 0;
18021da177e4SLinus Torvalds 	}
18031da177e4SLinus Torvalds 
18044305b541SArnaldo Carvalho de Melo 	ntail = skb->data_len + pad - (skb->end - skb->tail);
18055b057c6bSHerbert Xu 	if (likely(skb_cloned(skb) || ntail > 0)) {
18065b057c6bSHerbert Xu 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
18075b057c6bSHerbert Xu 		if (unlikely(err))
18085b057c6bSHerbert Xu 			goto free_skb;
18095b057c6bSHerbert Xu 	}
18105b057c6bSHerbert Xu 
18115b057c6bSHerbert Xu 	/* FIXME: The use of this function with non-linear skb's really needs
18125b057c6bSHerbert Xu 	 * to be audited.
18135b057c6bSHerbert Xu 	 */
18145b057c6bSHerbert Xu 	err = skb_linearize(skb);
18155b057c6bSHerbert Xu 	if (unlikely(err))
18165b057c6bSHerbert Xu 		goto free_skb;
18175b057c6bSHerbert Xu 
18185b057c6bSHerbert Xu 	memset(skb->data + skb->len, 0, pad);
18195b057c6bSHerbert Xu 	return 0;
18205b057c6bSHerbert Xu 
18215b057c6bSHerbert Xu free_skb:
1822cd0a137aSFlorian Fainelli 	if (free_on_error)
18231da177e4SLinus Torvalds 		kfree_skb(skb);
18245b057c6bSHerbert Xu 	return err;
18251da177e4SLinus Torvalds }
1826cd0a137aSFlorian Fainelli EXPORT_SYMBOL(__skb_pad);
18271da177e4SLinus Torvalds 
18280dde3e16SIlpo Järvinen /**
18290c7ddf36SMathias Krause  *	pskb_put - add data to the tail of a potentially fragmented buffer
18300c7ddf36SMathias Krause  *	@skb: start of the buffer to use
18310c7ddf36SMathias Krause  *	@tail: tail fragment of the buffer to use
18320c7ddf36SMathias Krause  *	@len: amount of data to add
18330c7ddf36SMathias Krause  *
18340c7ddf36SMathias Krause  *	This function extends the used data area of the potentially
18350c7ddf36SMathias Krause  *	fragmented buffer. @tail must be the last fragment of @skb -- or
18360c7ddf36SMathias Krause  *	@skb itself. If this would exceed the total buffer size the kernel
18370c7ddf36SMathias Krause  *	will panic. A pointer to the first byte of the extra data is
18380c7ddf36SMathias Krause  *	returned.
18390c7ddf36SMathias Krause  */
18400c7ddf36SMathias Krause 
18414df864c1SJohannes Berg void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
18420c7ddf36SMathias Krause {
18430c7ddf36SMathias Krause 	if (tail != skb) {
18440c7ddf36SMathias Krause 		skb->data_len += len;
18450c7ddf36SMathias Krause 		skb->len += len;
18460c7ddf36SMathias Krause 	}
18470c7ddf36SMathias Krause 	return skb_put(tail, len);
18480c7ddf36SMathias Krause }
18490c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put);
18500c7ddf36SMathias Krause 
18510c7ddf36SMathias Krause /**
18520dde3e16SIlpo Järvinen  *	skb_put - add data to a buffer
18530dde3e16SIlpo Järvinen  *	@skb: buffer to use
18540dde3e16SIlpo Järvinen  *	@len: amount of data to add
18550dde3e16SIlpo Järvinen  *
18560dde3e16SIlpo Järvinen  *	This function extends the used data area of the buffer. If this would
18570dde3e16SIlpo Järvinen  *	exceed the total buffer size the kernel will panic. A pointer to the
18580dde3e16SIlpo Järvinen  *	first byte of the extra data is returned.
18590dde3e16SIlpo Järvinen  */
18604df864c1SJohannes Berg void *skb_put(struct sk_buff *skb, unsigned int len)
18610dde3e16SIlpo Järvinen {
18624df864c1SJohannes Berg 	void *tmp = skb_tail_pointer(skb);
18630dde3e16SIlpo Järvinen 	SKB_LINEAR_ASSERT(skb);
18640dde3e16SIlpo Järvinen 	skb->tail += len;
18650dde3e16SIlpo Järvinen 	skb->len  += len;
18660dde3e16SIlpo Järvinen 	if (unlikely(skb->tail > skb->end))
18670dde3e16SIlpo Järvinen 		skb_over_panic(skb, len, __builtin_return_address(0));
18680dde3e16SIlpo Järvinen 	return tmp;
18690dde3e16SIlpo Järvinen }
18700dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put);
18710dde3e16SIlpo Järvinen 
18726be8ac2fSIlpo Järvinen /**
1873c2aa270aSIlpo Järvinen  *	skb_push - add data to the start of a buffer
1874c2aa270aSIlpo Järvinen  *	@skb: buffer to use
1875c2aa270aSIlpo Järvinen  *	@len: amount of data to add
1876c2aa270aSIlpo Järvinen  *
1877c2aa270aSIlpo Järvinen  *	This function extends the used data area of the buffer at the buffer
1878c2aa270aSIlpo Järvinen  *	start. If this would exceed the total buffer headroom the kernel will
1879c2aa270aSIlpo Järvinen  *	panic. A pointer to the first byte of the extra data is returned.
1880c2aa270aSIlpo Järvinen  */
1881d58ff351SJohannes Berg void *skb_push(struct sk_buff *skb, unsigned int len)
1882c2aa270aSIlpo Järvinen {
1883c2aa270aSIlpo Järvinen 	skb->data -= len;
1884c2aa270aSIlpo Järvinen 	skb->len  += len;
1885c2aa270aSIlpo Järvinen 	if (unlikely(skb->data < skb->head))
1886c2aa270aSIlpo Järvinen 		skb_under_panic(skb, len, __builtin_return_address(0));
1887c2aa270aSIlpo Järvinen 	return skb->data;
1888c2aa270aSIlpo Järvinen }
1889c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push);
1890c2aa270aSIlpo Järvinen 
1891c2aa270aSIlpo Järvinen /**
18926be8ac2fSIlpo Järvinen  *	skb_pull - remove data from the start of a buffer
18936be8ac2fSIlpo Järvinen  *	@skb: buffer to use
18946be8ac2fSIlpo Järvinen  *	@len: amount of data to remove
18956be8ac2fSIlpo Järvinen  *
18966be8ac2fSIlpo Järvinen  *	This function removes data from the start of a buffer, returning
18976be8ac2fSIlpo Järvinen  *	the memory to the headroom. A pointer to the next data in the buffer
18986be8ac2fSIlpo Järvinen  *	is returned. Once the data has been pulled future pushes will overwrite
18996be8ac2fSIlpo Järvinen  *	the old data.
19006be8ac2fSIlpo Järvinen  */
1901af72868bSJohannes Berg void *skb_pull(struct sk_buff *skb, unsigned int len)
19026be8ac2fSIlpo Järvinen {
190347d29646SDavid S. Miller 	return skb_pull_inline(skb, len);
19046be8ac2fSIlpo Järvinen }
19056be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull);
19066be8ac2fSIlpo Järvinen 
1907419ae74eSIlpo Järvinen /**
1908419ae74eSIlpo Järvinen  *	skb_trim - remove end from a buffer
1909419ae74eSIlpo Järvinen  *	@skb: buffer to alter
1910419ae74eSIlpo Järvinen  *	@len: new length
1911419ae74eSIlpo Järvinen  *
1912419ae74eSIlpo Järvinen  *	Cut the length of a buffer down by removing data from the tail. If
1913419ae74eSIlpo Järvinen  *	the buffer is already under the length specified it is not modified.
1914419ae74eSIlpo Järvinen  *	The skb must be linear.
1915419ae74eSIlpo Järvinen  */
1916419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len)
1917419ae74eSIlpo Järvinen {
1918419ae74eSIlpo Järvinen 	if (skb->len > len)
1919419ae74eSIlpo Järvinen 		__skb_trim(skb, len);
1920419ae74eSIlpo Järvinen }
1921419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim);
1922419ae74eSIlpo Järvinen 
19233cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers.
19241da177e4SLinus Torvalds  */
19251da177e4SLinus Torvalds 
19263cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len)
19271da177e4SLinus Torvalds {
192827b437c8SHerbert Xu 	struct sk_buff **fragp;
192927b437c8SHerbert Xu 	struct sk_buff *frag;
19301da177e4SLinus Torvalds 	int offset = skb_headlen(skb);
19311da177e4SLinus Torvalds 	int nfrags = skb_shinfo(skb)->nr_frags;
19321da177e4SLinus Torvalds 	int i;
193327b437c8SHerbert Xu 	int err;
193427b437c8SHerbert Xu 
193527b437c8SHerbert Xu 	if (skb_cloned(skb) &&
193627b437c8SHerbert Xu 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
193727b437c8SHerbert Xu 		return err;
19381da177e4SLinus Torvalds 
1939f4d26fb3SHerbert Xu 	i = 0;
1940f4d26fb3SHerbert Xu 	if (offset >= len)
1941f4d26fb3SHerbert Xu 		goto drop_pages;
1942f4d26fb3SHerbert Xu 
1943f4d26fb3SHerbert Xu 	for (; i < nfrags; i++) {
19449e903e08SEric Dumazet 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
194527b437c8SHerbert Xu 
194627b437c8SHerbert Xu 		if (end < len) {
19471da177e4SLinus Torvalds 			offset = end;
194827b437c8SHerbert Xu 			continue;
19491da177e4SLinus Torvalds 		}
19501da177e4SLinus Torvalds 
19519e903e08SEric Dumazet 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
195227b437c8SHerbert Xu 
1953f4d26fb3SHerbert Xu drop_pages:
195427b437c8SHerbert Xu 		skb_shinfo(skb)->nr_frags = i;
195527b437c8SHerbert Xu 
195627b437c8SHerbert Xu 		for (; i < nfrags; i++)
1957ea2ab693SIan Campbell 			skb_frag_unref(skb, i);
195827b437c8SHerbert Xu 
195921dc3301SDavid S. Miller 		if (skb_has_frag_list(skb))
196027b437c8SHerbert Xu 			skb_drop_fraglist(skb);
1961f4d26fb3SHerbert Xu 		goto done;
196227b437c8SHerbert Xu 	}
196327b437c8SHerbert Xu 
196427b437c8SHerbert Xu 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
196527b437c8SHerbert Xu 	     fragp = &frag->next) {
196627b437c8SHerbert Xu 		int end = offset + frag->len;
196727b437c8SHerbert Xu 
196827b437c8SHerbert Xu 		if (skb_shared(frag)) {
196927b437c8SHerbert Xu 			struct sk_buff *nfrag;
197027b437c8SHerbert Xu 
197127b437c8SHerbert Xu 			nfrag = skb_clone(frag, GFP_ATOMIC);
197227b437c8SHerbert Xu 			if (unlikely(!nfrag))
197327b437c8SHerbert Xu 				return -ENOMEM;
197427b437c8SHerbert Xu 
197527b437c8SHerbert Xu 			nfrag->next = frag->next;
197685bb2a60SEric Dumazet 			consume_skb(frag);
197727b437c8SHerbert Xu 			frag = nfrag;
197827b437c8SHerbert Xu 			*fragp = frag;
197927b437c8SHerbert Xu 		}
198027b437c8SHerbert Xu 
198127b437c8SHerbert Xu 		if (end < len) {
198227b437c8SHerbert Xu 			offset = end;
198327b437c8SHerbert Xu 			continue;
198427b437c8SHerbert Xu 		}
198527b437c8SHerbert Xu 
198627b437c8SHerbert Xu 		if (end > len &&
198727b437c8SHerbert Xu 		    unlikely((err = pskb_trim(frag, len - offset))))
198827b437c8SHerbert Xu 			return err;
198927b437c8SHerbert Xu 
199027b437c8SHerbert Xu 		if (frag->next)
199127b437c8SHerbert Xu 			skb_drop_list(&frag->next);
199227b437c8SHerbert Xu 		break;
199327b437c8SHerbert Xu 	}
199427b437c8SHerbert Xu 
1995f4d26fb3SHerbert Xu done:
199627b437c8SHerbert Xu 	if (len > skb_headlen(skb)) {
19971da177e4SLinus Torvalds 		skb->data_len -= skb->len - len;
19981da177e4SLinus Torvalds 		skb->len       = len;
19991da177e4SLinus Torvalds 	} else {
20001da177e4SLinus Torvalds 		skb->len       = len;
20011da177e4SLinus Torvalds 		skb->data_len  = 0;
200227a884dcSArnaldo Carvalho de Melo 		skb_set_tail_pointer(skb, len);
20031da177e4SLinus Torvalds 	}
20041da177e4SLinus Torvalds 
2005c21b48ccSEric Dumazet 	if (!skb->sk || skb->destructor == sock_edemux)
2006c21b48ccSEric Dumazet 		skb_condense(skb);
20071da177e4SLinus Torvalds 	return 0;
20081da177e4SLinus Torvalds }
2009b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim);
20101da177e4SLinus Torvalds 
201188078d98SEric Dumazet /* Note : use pskb_trim_rcsum() instead of calling this directly
201288078d98SEric Dumazet  */
201388078d98SEric Dumazet int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
201488078d98SEric Dumazet {
201588078d98SEric Dumazet 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
201688078d98SEric Dumazet 		int delta = skb->len - len;
201788078d98SEric Dumazet 
2018d55bef50SDimitris Michailidis 		skb->csum = csum_block_sub(skb->csum,
2019d55bef50SDimitris Michailidis 					   skb_checksum(skb, len, delta, 0),
2020d55bef50SDimitris Michailidis 					   len);
202154970a2fSVasily Averin 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
202254970a2fSVasily Averin 		int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
202354970a2fSVasily Averin 		int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
202454970a2fSVasily Averin 
202554970a2fSVasily Averin 		if (offset + sizeof(__sum16) > hdlen)
202654970a2fSVasily Averin 			return -EINVAL;
202788078d98SEric Dumazet 	}
202888078d98SEric Dumazet 	return __pskb_trim(skb, len);
202988078d98SEric Dumazet }
203088078d98SEric Dumazet EXPORT_SYMBOL(pskb_trim_rcsum_slow);
203188078d98SEric Dumazet 
20321da177e4SLinus Torvalds /**
20331da177e4SLinus Torvalds  *	__pskb_pull_tail - advance tail of skb header
20341da177e4SLinus Torvalds  *	@skb: buffer to reallocate
20351da177e4SLinus Torvalds  *	@delta: number of bytes to advance tail
20361da177e4SLinus Torvalds  *
20371da177e4SLinus Torvalds  *	The function makes a sense only on a fragmented &sk_buff,
20381da177e4SLinus Torvalds  *	it expands header moving its tail forward and copying necessary
20391da177e4SLinus Torvalds  *	data from fragmented part.
20401da177e4SLinus Torvalds  *
20411da177e4SLinus Torvalds  *	&sk_buff MUST have reference count of 1.
20421da177e4SLinus Torvalds  *
20431da177e4SLinus Torvalds  *	Returns %NULL (and &sk_buff does not change) if pull failed
20441da177e4SLinus Torvalds  *	or value of new tail of skb in the case of success.
20451da177e4SLinus Torvalds  *
20461da177e4SLinus Torvalds  *	All the pointers pointing into skb header may change and must be
20471da177e4SLinus Torvalds  *	reloaded after call to this function.
20481da177e4SLinus Torvalds  */
20491da177e4SLinus Torvalds 
20501da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part,
20511da177e4SLinus Torvalds  * when it is necessary.
20521da177e4SLinus Torvalds  * 1. It may fail due to malloc failure.
20531da177e4SLinus Torvalds  * 2. It may change skb pointers.
20541da177e4SLinus Torvalds  *
20551da177e4SLinus Torvalds  * It is pretty complicated. Luckily, it is called only in exceptional cases.
20561da177e4SLinus Torvalds  */
2057af72868bSJohannes Berg void *__pskb_pull_tail(struct sk_buff *skb, int delta)
20581da177e4SLinus Torvalds {
20591da177e4SLinus Torvalds 	/* If skb has not enough free space at tail, get new one
20601da177e4SLinus Torvalds 	 * plus 128 bytes for future expansions. If we have enough
20611da177e4SLinus Torvalds 	 * room at tail, reallocate without expansion only if skb is cloned.
20621da177e4SLinus Torvalds 	 */
20634305b541SArnaldo Carvalho de Melo 	int i, k, eat = (skb->tail + delta) - skb->end;
20641da177e4SLinus Torvalds 
20651da177e4SLinus Torvalds 	if (eat > 0 || skb_cloned(skb)) {
20661da177e4SLinus Torvalds 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
20671da177e4SLinus Torvalds 				     GFP_ATOMIC))
20681da177e4SLinus Torvalds 			return NULL;
20691da177e4SLinus Torvalds 	}
20701da177e4SLinus Torvalds 
20719f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
20729f77fad3STim Hansen 			     skb_tail_pointer(skb), delta));
20731da177e4SLinus Torvalds 
20741da177e4SLinus Torvalds 	/* Optimization: no fragments, no reasons to preestimate
20751da177e4SLinus Torvalds 	 * size of pulled pages. Superb.
20761da177e4SLinus Torvalds 	 */
207721dc3301SDavid S. Miller 	if (!skb_has_frag_list(skb))
20781da177e4SLinus Torvalds 		goto pull_pages;
20791da177e4SLinus Torvalds 
20801da177e4SLinus Torvalds 	/* Estimate size of pulled pages. */
20811da177e4SLinus Torvalds 	eat = delta;
20821da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
20839e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
20849e903e08SEric Dumazet 
20859e903e08SEric Dumazet 		if (size >= eat)
20861da177e4SLinus Torvalds 			goto pull_pages;
20879e903e08SEric Dumazet 		eat -= size;
20881da177e4SLinus Torvalds 	}
20891da177e4SLinus Torvalds 
20901da177e4SLinus Torvalds 	/* If we need update frag list, we are in troubles.
209109001b03SWenhua Shi 	 * Certainly, it is possible to add an offset to skb data,
20921da177e4SLinus Torvalds 	 * but taking into account that pulling is expected to
20931da177e4SLinus Torvalds 	 * be very rare operation, it is worth to fight against
20941da177e4SLinus Torvalds 	 * further bloating skb head and crucify ourselves here instead.
20951da177e4SLinus Torvalds 	 * Pure masohism, indeed. 8)8)
20961da177e4SLinus Torvalds 	 */
20971da177e4SLinus Torvalds 	if (eat) {
20981da177e4SLinus Torvalds 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
20991da177e4SLinus Torvalds 		struct sk_buff *clone = NULL;
21001da177e4SLinus Torvalds 		struct sk_buff *insp = NULL;
21011da177e4SLinus Torvalds 
21021da177e4SLinus Torvalds 		do {
21031da177e4SLinus Torvalds 			if (list->len <= eat) {
21041da177e4SLinus Torvalds 				/* Eaten as whole. */
21051da177e4SLinus Torvalds 				eat -= list->len;
21061da177e4SLinus Torvalds 				list = list->next;
21071da177e4SLinus Torvalds 				insp = list;
21081da177e4SLinus Torvalds 			} else {
21091da177e4SLinus Torvalds 				/* Eaten partially. */
21101da177e4SLinus Torvalds 
21111da177e4SLinus Torvalds 				if (skb_shared(list)) {
21121da177e4SLinus Torvalds 					/* Sucks! We need to fork list. :-( */
21131da177e4SLinus Torvalds 					clone = skb_clone(list, GFP_ATOMIC);
21141da177e4SLinus Torvalds 					if (!clone)
21151da177e4SLinus Torvalds 						return NULL;
21161da177e4SLinus Torvalds 					insp = list->next;
21171da177e4SLinus Torvalds 					list = clone;
21181da177e4SLinus Torvalds 				} else {
21191da177e4SLinus Torvalds 					/* This may be pulled without
21201da177e4SLinus Torvalds 					 * problems. */
21211da177e4SLinus Torvalds 					insp = list;
21221da177e4SLinus Torvalds 				}
21231da177e4SLinus Torvalds 				if (!pskb_pull(list, eat)) {
21241da177e4SLinus Torvalds 					kfree_skb(clone);
21251da177e4SLinus Torvalds 					return NULL;
21261da177e4SLinus Torvalds 				}
21271da177e4SLinus Torvalds 				break;
21281da177e4SLinus Torvalds 			}
21291da177e4SLinus Torvalds 		} while (eat);
21301da177e4SLinus Torvalds 
21311da177e4SLinus Torvalds 		/* Free pulled out fragments. */
21321da177e4SLinus Torvalds 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
21331da177e4SLinus Torvalds 			skb_shinfo(skb)->frag_list = list->next;
21341da177e4SLinus Torvalds 			kfree_skb(list);
21351da177e4SLinus Torvalds 		}
21361da177e4SLinus Torvalds 		/* And insert new clone at head. */
21371da177e4SLinus Torvalds 		if (clone) {
21381da177e4SLinus Torvalds 			clone->next = list;
21391da177e4SLinus Torvalds 			skb_shinfo(skb)->frag_list = clone;
21401da177e4SLinus Torvalds 		}
21411da177e4SLinus Torvalds 	}
21421da177e4SLinus Torvalds 	/* Success! Now we may commit changes to skb data. */
21431da177e4SLinus Torvalds 
21441da177e4SLinus Torvalds pull_pages:
21451da177e4SLinus Torvalds 	eat = delta;
21461da177e4SLinus Torvalds 	k = 0;
21471da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
21489e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
21499e903e08SEric Dumazet 
21509e903e08SEric Dumazet 		if (size <= eat) {
2151ea2ab693SIan Campbell 			skb_frag_unref(skb, i);
21529e903e08SEric Dumazet 			eat -= size;
21531da177e4SLinus Torvalds 		} else {
2154b54c9d5bSJonathan Lemon 			skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2155b54c9d5bSJonathan Lemon 
2156b54c9d5bSJonathan Lemon 			*frag = skb_shinfo(skb)->frags[i];
21571da177e4SLinus Torvalds 			if (eat) {
2158b54c9d5bSJonathan Lemon 				skb_frag_off_add(frag, eat);
2159b54c9d5bSJonathan Lemon 				skb_frag_size_sub(frag, eat);
21603ccc6c6fSlinzhang 				if (!i)
21613ccc6c6fSlinzhang 					goto end;
21621da177e4SLinus Torvalds 				eat = 0;
21631da177e4SLinus Torvalds 			}
21641da177e4SLinus Torvalds 			k++;
21651da177e4SLinus Torvalds 		}
21661da177e4SLinus Torvalds 	}
21671da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
21681da177e4SLinus Torvalds 
21693ccc6c6fSlinzhang end:
21701da177e4SLinus Torvalds 	skb->tail     += delta;
21711da177e4SLinus Torvalds 	skb->data_len -= delta;
21721da177e4SLinus Torvalds 
21731f8b977aSWillem de Bruijn 	if (!skb->data_len)
21741f8b977aSWillem de Bruijn 		skb_zcopy_clear(skb, false);
21751f8b977aSWillem de Bruijn 
217627a884dcSArnaldo Carvalho de Melo 	return skb_tail_pointer(skb);
21771da177e4SLinus Torvalds }
2178b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail);
21791da177e4SLinus Torvalds 
218022019b17SEric Dumazet /**
218122019b17SEric Dumazet  *	skb_copy_bits - copy bits from skb to kernel buffer
218222019b17SEric Dumazet  *	@skb: source skb
218322019b17SEric Dumazet  *	@offset: offset in source
218422019b17SEric Dumazet  *	@to: destination buffer
218522019b17SEric Dumazet  *	@len: number of bytes to copy
218622019b17SEric Dumazet  *
218722019b17SEric Dumazet  *	Copy the specified number of bytes from the source skb to the
218822019b17SEric Dumazet  *	destination buffer.
218922019b17SEric Dumazet  *
219022019b17SEric Dumazet  *	CAUTION ! :
219122019b17SEric Dumazet  *		If its prototype is ever changed,
219222019b17SEric Dumazet  *		check arch/{*}/net/{*}.S files,
219322019b17SEric Dumazet  *		since it is called from BPF assembly code.
219422019b17SEric Dumazet  */
21951da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
21961da177e4SLinus Torvalds {
21971a028e50SDavid S. Miller 	int start = skb_headlen(skb);
2198fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
2199fbb398a8SDavid S. Miller 	int i, copy;
22001da177e4SLinus Torvalds 
22011da177e4SLinus Torvalds 	if (offset > (int)skb->len - len)
22021da177e4SLinus Torvalds 		goto fault;
22031da177e4SLinus Torvalds 
22041da177e4SLinus Torvalds 	/* Copy header. */
22051a028e50SDavid S. Miller 	if ((copy = start - offset) > 0) {
22061da177e4SLinus Torvalds 		if (copy > len)
22071da177e4SLinus Torvalds 			copy = len;
2208d626f62bSArnaldo Carvalho de Melo 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
22091da177e4SLinus Torvalds 		if ((len -= copy) == 0)
22101da177e4SLinus Torvalds 			return 0;
22111da177e4SLinus Torvalds 		offset += copy;
22121da177e4SLinus Torvalds 		to     += copy;
22131da177e4SLinus Torvalds 	}
22141da177e4SLinus Torvalds 
22151da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
22161a028e50SDavid S. Miller 		int end;
221751c56b00SEric Dumazet 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
22181da177e4SLinus Torvalds 
2219547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
22201a028e50SDavid S. Miller 
222151c56b00SEric Dumazet 		end = start + skb_frag_size(f);
22221da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2223c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2224c613c209SWillem de Bruijn 			struct page *p;
22251da177e4SLinus Torvalds 			u8 *vaddr;
22261da177e4SLinus Torvalds 
22271da177e4SLinus Torvalds 			if (copy > len)
22281da177e4SLinus Torvalds 				copy = len;
22291da177e4SLinus Torvalds 
2230c613c209SWillem de Bruijn 			skb_frag_foreach_page(f,
2231b54c9d5bSJonathan Lemon 					      skb_frag_off(f) + offset - start,
2232c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2233c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2234c613c209SWillem de Bruijn 				memcpy(to + copied, vaddr + p_off, p_len);
223551c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2236c613c209SWillem de Bruijn 			}
22371da177e4SLinus Torvalds 
22381da177e4SLinus Torvalds 			if ((len -= copy) == 0)
22391da177e4SLinus Torvalds 				return 0;
22401da177e4SLinus Torvalds 			offset += copy;
22411da177e4SLinus Torvalds 			to     += copy;
22421da177e4SLinus Torvalds 		}
22431a028e50SDavid S. Miller 		start = end;
22441da177e4SLinus Torvalds 	}
22451da177e4SLinus Torvalds 
2246fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
22471a028e50SDavid S. Miller 		int end;
22481da177e4SLinus Torvalds 
2249547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
22501a028e50SDavid S. Miller 
2251fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
22521da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
22531da177e4SLinus Torvalds 			if (copy > len)
22541da177e4SLinus Torvalds 				copy = len;
2255fbb398a8SDavid S. Miller 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
22561da177e4SLinus Torvalds 				goto fault;
22571da177e4SLinus Torvalds 			if ((len -= copy) == 0)
22581da177e4SLinus Torvalds 				return 0;
22591da177e4SLinus Torvalds 			offset += copy;
22601da177e4SLinus Torvalds 			to     += copy;
22611da177e4SLinus Torvalds 		}
22621a028e50SDavid S. Miller 		start = end;
22631da177e4SLinus Torvalds 	}
2264a6686f2fSShirley Ma 
22651da177e4SLinus Torvalds 	if (!len)
22661da177e4SLinus Torvalds 		return 0;
22671da177e4SLinus Torvalds 
22681da177e4SLinus Torvalds fault:
22691da177e4SLinus Torvalds 	return -EFAULT;
22701da177e4SLinus Torvalds }
2271b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits);
22721da177e4SLinus Torvalds 
22739c55e01cSJens Axboe /*
22749c55e01cSJens Axboe  * Callback from splice_to_pipe(), if we need to release some pages
22759c55e01cSJens Axboe  * at the end of the spd in case we error'ed out in filling the pipe.
22769c55e01cSJens Axboe  */
22779c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
22789c55e01cSJens Axboe {
22798b9d3728SJarek Poplawski 	put_page(spd->pages[i]);
22808b9d3728SJarek Poplawski }
22819c55e01cSJens Axboe 
2282a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len,
22834fb66994SJarek Poplawski 				   unsigned int *offset,
228418aafc62SEric Dumazet 				   struct sock *sk)
22858b9d3728SJarek Poplawski {
22865640f768SEric Dumazet 	struct page_frag *pfrag = sk_page_frag(sk);
22878b9d3728SJarek Poplawski 
22885640f768SEric Dumazet 	if (!sk_page_frag_refill(sk, pfrag))
22898b9d3728SJarek Poplawski 		return NULL;
22904fb66994SJarek Poplawski 
22915640f768SEric Dumazet 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
22924fb66994SJarek Poplawski 
22935640f768SEric Dumazet 	memcpy(page_address(pfrag->page) + pfrag->offset,
22945640f768SEric Dumazet 	       page_address(page) + *offset, *len);
22955640f768SEric Dumazet 	*offset = pfrag->offset;
22965640f768SEric Dumazet 	pfrag->offset += *len;
22974fb66994SJarek Poplawski 
22985640f768SEric Dumazet 	return pfrag->page;
22999c55e01cSJens Axboe }
23009c55e01cSJens Axboe 
230141c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
230241c73a0dSEric Dumazet 			     struct page *page,
230341c73a0dSEric Dumazet 			     unsigned int offset)
230441c73a0dSEric Dumazet {
230541c73a0dSEric Dumazet 	return	spd->nr_pages &&
230641c73a0dSEric Dumazet 		spd->pages[spd->nr_pages - 1] == page &&
230741c73a0dSEric Dumazet 		(spd->partial[spd->nr_pages - 1].offset +
230841c73a0dSEric Dumazet 		 spd->partial[spd->nr_pages - 1].len == offset);
230941c73a0dSEric Dumazet }
231041c73a0dSEric Dumazet 
23119c55e01cSJens Axboe /*
23129c55e01cSJens Axboe  * Fill page/offset/length into spd, if it can hold more pages.
23139c55e01cSJens Axboe  */
2314a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd,
231535f3d14dSJens Axboe 			  struct pipe_inode_info *pipe, struct page *page,
23164fb66994SJarek Poplawski 			  unsigned int *len, unsigned int offset,
231718aafc62SEric Dumazet 			  bool linear,
23187a67e56fSJarek Poplawski 			  struct sock *sk)
23199c55e01cSJens Axboe {
232041c73a0dSEric Dumazet 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2321a108d5f3SDavid S. Miller 		return true;
23229c55e01cSJens Axboe 
23238b9d3728SJarek Poplawski 	if (linear) {
232418aafc62SEric Dumazet 		page = linear_to_page(page, len, &offset, sk);
23258b9d3728SJarek Poplawski 		if (!page)
2326a108d5f3SDavid S. Miller 			return true;
232741c73a0dSEric Dumazet 	}
232841c73a0dSEric Dumazet 	if (spd_can_coalesce(spd, page, offset)) {
232941c73a0dSEric Dumazet 		spd->partial[spd->nr_pages - 1].len += *len;
2330a108d5f3SDavid S. Miller 		return false;
233141c73a0dSEric Dumazet 	}
23328b9d3728SJarek Poplawski 	get_page(page);
23339c55e01cSJens Axboe 	spd->pages[spd->nr_pages] = page;
23344fb66994SJarek Poplawski 	spd->partial[spd->nr_pages].len = *len;
23359c55e01cSJens Axboe 	spd->partial[spd->nr_pages].offset = offset;
23369c55e01cSJens Axboe 	spd->nr_pages++;
23378b9d3728SJarek Poplawski 
2338a108d5f3SDavid S. Miller 	return false;
23399c55e01cSJens Axboe }
23409c55e01cSJens Axboe 
2341a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff,
23422870c43dSOctavian Purdila 			     unsigned int plen, unsigned int *off,
234318aafc62SEric Dumazet 			     unsigned int *len,
2344d7ccf7c0SEric Dumazet 			     struct splice_pipe_desc *spd, bool linear,
234535f3d14dSJens Axboe 			     struct sock *sk,
234635f3d14dSJens Axboe 			     struct pipe_inode_info *pipe)
23479c55e01cSJens Axboe {
23482870c43dSOctavian Purdila 	if (!*len)
2349a108d5f3SDavid S. Miller 		return true;
23509c55e01cSJens Axboe 
23512870c43dSOctavian Purdila 	/* skip this segment if already processed */
23522870c43dSOctavian Purdila 	if (*off >= plen) {
23532870c43dSOctavian Purdila 		*off -= plen;
2354a108d5f3SDavid S. Miller 		return false;
23552870c43dSOctavian Purdila 	}
23562870c43dSOctavian Purdila 
23572870c43dSOctavian Purdila 	/* ignore any bits we already processed */
23589ca1b22dSEric Dumazet 	poff += *off;
23599ca1b22dSEric Dumazet 	plen -= *off;
23602870c43dSOctavian Purdila 	*off = 0;
23612870c43dSOctavian Purdila 
236218aafc62SEric Dumazet 	do {
236318aafc62SEric Dumazet 		unsigned int flen = min(*len, plen);
23642870c43dSOctavian Purdila 
236518aafc62SEric Dumazet 		if (spd_fill_page(spd, pipe, page, &flen, poff,
236618aafc62SEric Dumazet 				  linear, sk))
2367a108d5f3SDavid S. Miller 			return true;
236818aafc62SEric Dumazet 		poff += flen;
236918aafc62SEric Dumazet 		plen -= flen;
23702870c43dSOctavian Purdila 		*len -= flen;
237118aafc62SEric Dumazet 	} while (*len && plen);
23722870c43dSOctavian Purdila 
2373a108d5f3SDavid S. Miller 	return false;
2374db43a282SOctavian Purdila }
23759c55e01cSJens Axboe 
23769c55e01cSJens Axboe /*
2377a108d5f3SDavid S. Miller  * Map linear and fragment data from the skb to spd. It reports true if the
23782870c43dSOctavian Purdila  * pipe is full or if we already spliced the requested length.
23799c55e01cSJens Axboe  */
2380a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
238135f3d14dSJens Axboe 			      unsigned int *offset, unsigned int *len,
238235f3d14dSJens Axboe 			      struct splice_pipe_desc *spd, struct sock *sk)
23832870c43dSOctavian Purdila {
23842870c43dSOctavian Purdila 	int seg;
2385fa9835e5STom Herbert 	struct sk_buff *iter;
23869c55e01cSJens Axboe 
23871d0c0b32SEric Dumazet 	/* map the linear part :
23882996d31fSAlexander Duyck 	 * If skb->head_frag is set, this 'linear' part is backed by a
23892996d31fSAlexander Duyck 	 * fragment, and if the head is not shared with any clones then
23902996d31fSAlexander Duyck 	 * we can avoid a copy since we own the head portion of this page.
23919c55e01cSJens Axboe 	 */
23922870c43dSOctavian Purdila 	if (__splice_segment(virt_to_page(skb->data),
23932870c43dSOctavian Purdila 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
23942870c43dSOctavian Purdila 			     skb_headlen(skb),
239518aafc62SEric Dumazet 			     offset, len, spd,
23963a7c1ee4SAlexander Duyck 			     skb_head_is_locked(skb),
23971d0c0b32SEric Dumazet 			     sk, pipe))
2398a108d5f3SDavid S. Miller 		return true;
23999c55e01cSJens Axboe 
24009c55e01cSJens Axboe 	/*
24019c55e01cSJens Axboe 	 * then map the fragments
24029c55e01cSJens Axboe 	 */
24039c55e01cSJens Axboe 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
24049c55e01cSJens Axboe 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
24059c55e01cSJens Axboe 
2406ea2ab693SIan Campbell 		if (__splice_segment(skb_frag_page(f),
2407b54c9d5bSJonathan Lemon 				     skb_frag_off(f), skb_frag_size(f),
240818aafc62SEric Dumazet 				     offset, len, spd, false, sk, pipe))
2409a108d5f3SDavid S. Miller 			return true;
24109c55e01cSJens Axboe 	}
24119c55e01cSJens Axboe 
2412fa9835e5STom Herbert 	skb_walk_frags(skb, iter) {
2413fa9835e5STom Herbert 		if (*offset >= iter->len) {
2414fa9835e5STom Herbert 			*offset -= iter->len;
2415fa9835e5STom Herbert 			continue;
2416fa9835e5STom Herbert 		}
2417fa9835e5STom Herbert 		/* __skb_splice_bits() only fails if the output has no room
2418fa9835e5STom Herbert 		 * left, so no point in going over the frag_list for the error
2419fa9835e5STom Herbert 		 * case.
2420fa9835e5STom Herbert 		 */
2421fa9835e5STom Herbert 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2422fa9835e5STom Herbert 			return true;
2423fa9835e5STom Herbert 	}
2424fa9835e5STom Herbert 
2425a108d5f3SDavid S. Miller 	return false;
24269c55e01cSJens Axboe }
24279c55e01cSJens Axboe 
24289c55e01cSJens Axboe /*
24299c55e01cSJens Axboe  * Map data from the skb to a pipe. Should handle both the linear part,
2430fa9835e5STom Herbert  * the fragments, and the frag list.
24319c55e01cSJens Axboe  */
2432a60e3cc7SHannes Frederic Sowa int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
24339c55e01cSJens Axboe 		    struct pipe_inode_info *pipe, unsigned int tlen,
243425869262SAl Viro 		    unsigned int flags)
24359c55e01cSJens Axboe {
243641c73a0dSEric Dumazet 	struct partial_page partial[MAX_SKB_FRAGS];
243741c73a0dSEric Dumazet 	struct page *pages[MAX_SKB_FRAGS];
24389c55e01cSJens Axboe 	struct splice_pipe_desc spd = {
24399c55e01cSJens Axboe 		.pages = pages,
24409c55e01cSJens Axboe 		.partial = partial,
2441047fe360SEric Dumazet 		.nr_pages_max = MAX_SKB_FRAGS,
244228a625cbSMiklos Szeredi 		.ops = &nosteal_pipe_buf_ops,
24439c55e01cSJens Axboe 		.spd_release = sock_spd_release,
24449c55e01cSJens Axboe 	};
244535f3d14dSJens Axboe 	int ret = 0;
244635f3d14dSJens Axboe 
2447fa9835e5STom Herbert 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
24489c55e01cSJens Axboe 
2449a60e3cc7SHannes Frederic Sowa 	if (spd.nr_pages)
245025869262SAl Viro 		ret = splice_to_pipe(pipe, &spd);
24519c55e01cSJens Axboe 
245235f3d14dSJens Axboe 	return ret;
24539c55e01cSJens Axboe }
24542b514574SHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_splice_bits);
24559c55e01cSJens Axboe 
245620bf50deSTom Herbert /* Send skb data on a socket. Socket must be locked. */
245720bf50deSTom Herbert int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
245820bf50deSTom Herbert 			 int len)
245920bf50deSTom Herbert {
246020bf50deSTom Herbert 	unsigned int orig_len = len;
246120bf50deSTom Herbert 	struct sk_buff *head = skb;
246220bf50deSTom Herbert 	unsigned short fragidx;
246320bf50deSTom Herbert 	int slen, ret;
246420bf50deSTom Herbert 
246520bf50deSTom Herbert do_frag_list:
246620bf50deSTom Herbert 
246720bf50deSTom Herbert 	/* Deal with head data */
246820bf50deSTom Herbert 	while (offset < skb_headlen(skb) && len) {
246920bf50deSTom Herbert 		struct kvec kv;
247020bf50deSTom Herbert 		struct msghdr msg;
247120bf50deSTom Herbert 
247220bf50deSTom Herbert 		slen = min_t(int, len, skb_headlen(skb) - offset);
247320bf50deSTom Herbert 		kv.iov_base = skb->data + offset;
2474db5980d8SJohn Fastabend 		kv.iov_len = slen;
247520bf50deSTom Herbert 		memset(&msg, 0, sizeof(msg));
2476bd95e678SJohn Fastabend 		msg.msg_flags = MSG_DONTWAIT;
247720bf50deSTom Herbert 
247820bf50deSTom Herbert 		ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
247920bf50deSTom Herbert 		if (ret <= 0)
248020bf50deSTom Herbert 			goto error;
248120bf50deSTom Herbert 
248220bf50deSTom Herbert 		offset += ret;
248320bf50deSTom Herbert 		len -= ret;
248420bf50deSTom Herbert 	}
248520bf50deSTom Herbert 
248620bf50deSTom Herbert 	/* All the data was skb head? */
248720bf50deSTom Herbert 	if (!len)
248820bf50deSTom Herbert 		goto out;
248920bf50deSTom Herbert 
249020bf50deSTom Herbert 	/* Make offset relative to start of frags */
249120bf50deSTom Herbert 	offset -= skb_headlen(skb);
249220bf50deSTom Herbert 
249320bf50deSTom Herbert 	/* Find where we are in frag list */
249420bf50deSTom Herbert 	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
249520bf50deSTom Herbert 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
249620bf50deSTom Herbert 
2497d8e18a51SMatthew Wilcox (Oracle) 		if (offset < skb_frag_size(frag))
249820bf50deSTom Herbert 			break;
249920bf50deSTom Herbert 
2500d8e18a51SMatthew Wilcox (Oracle) 		offset -= skb_frag_size(frag);
250120bf50deSTom Herbert 	}
250220bf50deSTom Herbert 
250320bf50deSTom Herbert 	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
250420bf50deSTom Herbert 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
250520bf50deSTom Herbert 
2506d8e18a51SMatthew Wilcox (Oracle) 		slen = min_t(size_t, len, skb_frag_size(frag) - offset);
250720bf50deSTom Herbert 
250820bf50deSTom Herbert 		while (slen) {
2509d8e18a51SMatthew Wilcox (Oracle) 			ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
2510b54c9d5bSJonathan Lemon 						     skb_frag_off(frag) + offset,
251120bf50deSTom Herbert 						     slen, MSG_DONTWAIT);
251220bf50deSTom Herbert 			if (ret <= 0)
251320bf50deSTom Herbert 				goto error;
251420bf50deSTom Herbert 
251520bf50deSTom Herbert 			len -= ret;
251620bf50deSTom Herbert 			offset += ret;
251720bf50deSTom Herbert 			slen -= ret;
251820bf50deSTom Herbert 		}
251920bf50deSTom Herbert 
252020bf50deSTom Herbert 		offset = 0;
252120bf50deSTom Herbert 	}
252220bf50deSTom Herbert 
252320bf50deSTom Herbert 	if (len) {
252420bf50deSTom Herbert 		/* Process any frag lists */
252520bf50deSTom Herbert 
252620bf50deSTom Herbert 		if (skb == head) {
252720bf50deSTom Herbert 			if (skb_has_frag_list(skb)) {
252820bf50deSTom Herbert 				skb = skb_shinfo(skb)->frag_list;
252920bf50deSTom Herbert 				goto do_frag_list;
253020bf50deSTom Herbert 			}
253120bf50deSTom Herbert 		} else if (skb->next) {
253220bf50deSTom Herbert 			skb = skb->next;
253320bf50deSTom Herbert 			goto do_frag_list;
253420bf50deSTom Herbert 		}
253520bf50deSTom Herbert 	}
253620bf50deSTom Herbert 
253720bf50deSTom Herbert out:
253820bf50deSTom Herbert 	return orig_len - len;
253920bf50deSTom Herbert 
254020bf50deSTom Herbert error:
254120bf50deSTom Herbert 	return orig_len == len ? ret : orig_len - len;
254220bf50deSTom Herbert }
254320bf50deSTom Herbert EXPORT_SYMBOL_GPL(skb_send_sock_locked);
254420bf50deSTom Herbert 
2545357b40a1SHerbert Xu /**
2546357b40a1SHerbert Xu  *	skb_store_bits - store bits from kernel buffer to skb
2547357b40a1SHerbert Xu  *	@skb: destination buffer
2548357b40a1SHerbert Xu  *	@offset: offset in destination
2549357b40a1SHerbert Xu  *	@from: source buffer
2550357b40a1SHerbert Xu  *	@len: number of bytes to copy
2551357b40a1SHerbert Xu  *
2552357b40a1SHerbert Xu  *	Copy the specified number of bytes from the source buffer to the
2553357b40a1SHerbert Xu  *	destination skb.  This function handles all the messy bits of
2554357b40a1SHerbert Xu  *	traversing fragment lists and such.
2555357b40a1SHerbert Xu  */
2556357b40a1SHerbert Xu 
25570c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2558357b40a1SHerbert Xu {
25591a028e50SDavid S. Miller 	int start = skb_headlen(skb);
2560fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
2561fbb398a8SDavid S. Miller 	int i, copy;
2562357b40a1SHerbert Xu 
2563357b40a1SHerbert Xu 	if (offset > (int)skb->len - len)
2564357b40a1SHerbert Xu 		goto fault;
2565357b40a1SHerbert Xu 
25661a028e50SDavid S. Miller 	if ((copy = start - offset) > 0) {
2567357b40a1SHerbert Xu 		if (copy > len)
2568357b40a1SHerbert Xu 			copy = len;
256927d7ff46SArnaldo Carvalho de Melo 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
2570357b40a1SHerbert Xu 		if ((len -= copy) == 0)
2571357b40a1SHerbert Xu 			return 0;
2572357b40a1SHerbert Xu 		offset += copy;
2573357b40a1SHerbert Xu 		from += copy;
2574357b40a1SHerbert Xu 	}
2575357b40a1SHerbert Xu 
2576357b40a1SHerbert Xu 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2577357b40a1SHerbert Xu 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
25781a028e50SDavid S. Miller 		int end;
2579357b40a1SHerbert Xu 
2580547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
25811a028e50SDavid S. Miller 
25829e903e08SEric Dumazet 		end = start + skb_frag_size(frag);
2583357b40a1SHerbert Xu 		if ((copy = end - offset) > 0) {
2584c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2585c613c209SWillem de Bruijn 			struct page *p;
2586357b40a1SHerbert Xu 			u8 *vaddr;
2587357b40a1SHerbert Xu 
2588357b40a1SHerbert Xu 			if (copy > len)
2589357b40a1SHerbert Xu 				copy = len;
2590357b40a1SHerbert Xu 
2591c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2592b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset - start,
2593c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2594c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2595c613c209SWillem de Bruijn 				memcpy(vaddr + p_off, from + copied, p_len);
259651c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2597c613c209SWillem de Bruijn 			}
2598357b40a1SHerbert Xu 
2599357b40a1SHerbert Xu 			if ((len -= copy) == 0)
2600357b40a1SHerbert Xu 				return 0;
2601357b40a1SHerbert Xu 			offset += copy;
2602357b40a1SHerbert Xu 			from += copy;
2603357b40a1SHerbert Xu 		}
26041a028e50SDavid S. Miller 		start = end;
2605357b40a1SHerbert Xu 	}
2606357b40a1SHerbert Xu 
2607fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
26081a028e50SDavid S. Miller 		int end;
2609357b40a1SHerbert Xu 
2610547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
26111a028e50SDavid S. Miller 
2612fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
2613357b40a1SHerbert Xu 		if ((copy = end - offset) > 0) {
2614357b40a1SHerbert Xu 			if (copy > len)
2615357b40a1SHerbert Xu 				copy = len;
2616fbb398a8SDavid S. Miller 			if (skb_store_bits(frag_iter, offset - start,
26171a028e50SDavid S. Miller 					   from, copy))
2618357b40a1SHerbert Xu 				goto fault;
2619357b40a1SHerbert Xu 			if ((len -= copy) == 0)
2620357b40a1SHerbert Xu 				return 0;
2621357b40a1SHerbert Xu 			offset += copy;
2622357b40a1SHerbert Xu 			from += copy;
2623357b40a1SHerbert Xu 		}
26241a028e50SDavid S. Miller 		start = end;
2625357b40a1SHerbert Xu 	}
2626357b40a1SHerbert Xu 	if (!len)
2627357b40a1SHerbert Xu 		return 0;
2628357b40a1SHerbert Xu 
2629357b40a1SHerbert Xu fault:
2630357b40a1SHerbert Xu 	return -EFAULT;
2631357b40a1SHerbert Xu }
2632357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits);
2633357b40a1SHerbert Xu 
26341da177e4SLinus Torvalds /* Checksum skb data. */
26352817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
26362817a336SDaniel Borkmann 		      __wsum csum, const struct skb_checksum_ops *ops)
26371da177e4SLinus Torvalds {
26381a028e50SDavid S. Miller 	int start = skb_headlen(skb);
26391a028e50SDavid S. Miller 	int i, copy = start - offset;
2640fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
26411da177e4SLinus Torvalds 	int pos = 0;
26421da177e4SLinus Torvalds 
26431da177e4SLinus Torvalds 	/* Checksum header. */
26441da177e4SLinus Torvalds 	if (copy > 0) {
26451da177e4SLinus Torvalds 		if (copy > len)
26461da177e4SLinus Torvalds 			copy = len;
26472544af03SMatteo Croce 		csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
26482544af03SMatteo Croce 				       skb->data + offset, copy, csum);
26491da177e4SLinus Torvalds 		if ((len -= copy) == 0)
26501da177e4SLinus Torvalds 			return csum;
26511da177e4SLinus Torvalds 		offset += copy;
26521da177e4SLinus Torvalds 		pos	= copy;
26531da177e4SLinus Torvalds 	}
26541da177e4SLinus Torvalds 
26551da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
26561a028e50SDavid S. Miller 		int end;
265751c56b00SEric Dumazet 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
26581da177e4SLinus Torvalds 
2659547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
26601a028e50SDavid S. Miller 
266151c56b00SEric Dumazet 		end = start + skb_frag_size(frag);
26621da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2663c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2664c613c209SWillem de Bruijn 			struct page *p;
266544bb9363SAl Viro 			__wsum csum2;
26661da177e4SLinus Torvalds 			u8 *vaddr;
26671da177e4SLinus Torvalds 
26681da177e4SLinus Torvalds 			if (copy > len)
26691da177e4SLinus Torvalds 				copy = len;
2670c613c209SWillem de Bruijn 
2671c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2672b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset - start,
2673c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2674c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
26752544af03SMatteo Croce 				csum2 = INDIRECT_CALL_1(ops->update,
26762544af03SMatteo Croce 							csum_partial_ext,
26772544af03SMatteo Croce 							vaddr + p_off, p_len, 0);
267851c56b00SEric Dumazet 				kunmap_atomic(vaddr);
26792544af03SMatteo Croce 				csum = INDIRECT_CALL_1(ops->combine,
26802544af03SMatteo Croce 						       csum_block_add_ext, csum,
26812544af03SMatteo Croce 						       csum2, pos, p_len);
2682c613c209SWillem de Bruijn 				pos += p_len;
2683c613c209SWillem de Bruijn 			}
2684c613c209SWillem de Bruijn 
26851da177e4SLinus Torvalds 			if (!(len -= copy))
26861da177e4SLinus Torvalds 				return csum;
26871da177e4SLinus Torvalds 			offset += copy;
26881da177e4SLinus Torvalds 		}
26891a028e50SDavid S. Miller 		start = end;
26901da177e4SLinus Torvalds 	}
26911da177e4SLinus Torvalds 
2692fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
26931a028e50SDavid S. Miller 		int end;
26941da177e4SLinus Torvalds 
2695547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
26961a028e50SDavid S. Miller 
2697fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
26981da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
26995f92a738SAl Viro 			__wsum csum2;
27001da177e4SLinus Torvalds 			if (copy > len)
27011da177e4SLinus Torvalds 				copy = len;
27022817a336SDaniel Borkmann 			csum2 = __skb_checksum(frag_iter, offset - start,
27032817a336SDaniel Borkmann 					       copy, 0, ops);
27042544af03SMatteo Croce 			csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
27052544af03SMatteo Croce 					       csum, csum2, pos, copy);
27061da177e4SLinus Torvalds 			if ((len -= copy) == 0)
27071da177e4SLinus Torvalds 				return csum;
27081da177e4SLinus Torvalds 			offset += copy;
27091da177e4SLinus Torvalds 			pos    += copy;
27101da177e4SLinus Torvalds 		}
27111a028e50SDavid S. Miller 		start = end;
27121da177e4SLinus Torvalds 	}
271309a62660SKris Katterjohn 	BUG_ON(len);
27141da177e4SLinus Torvalds 
27151da177e4SLinus Torvalds 	return csum;
27161da177e4SLinus Torvalds }
27172817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum);
27182817a336SDaniel Borkmann 
27192817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset,
27202817a336SDaniel Borkmann 		    int len, __wsum csum)
27212817a336SDaniel Borkmann {
27222817a336SDaniel Borkmann 	const struct skb_checksum_ops ops = {
2723cea80ea8SDaniel Borkmann 		.update  = csum_partial_ext,
27242817a336SDaniel Borkmann 		.combine = csum_block_add_ext,
27252817a336SDaniel Borkmann 	};
27262817a336SDaniel Borkmann 
27272817a336SDaniel Borkmann 	return __skb_checksum(skb, offset, len, csum, &ops);
27282817a336SDaniel Borkmann }
2729b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum);
27301da177e4SLinus Torvalds 
27311da177e4SLinus Torvalds /* Both of above in one bottle. */
27321da177e4SLinus Torvalds 
273381d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
27348d5930dfSAl Viro 				    u8 *to, int len)
27351da177e4SLinus Torvalds {
27361a028e50SDavid S. Miller 	int start = skb_headlen(skb);
27371a028e50SDavid S. Miller 	int i, copy = start - offset;
2738fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
27391da177e4SLinus Torvalds 	int pos = 0;
27408d5930dfSAl Viro 	__wsum csum = 0;
27411da177e4SLinus Torvalds 
27421da177e4SLinus Torvalds 	/* Copy header. */
27431da177e4SLinus Torvalds 	if (copy > 0) {
27441da177e4SLinus Torvalds 		if (copy > len)
27451da177e4SLinus Torvalds 			copy = len;
27461da177e4SLinus Torvalds 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2747cc44c17bSAl Viro 						 copy);
27481da177e4SLinus Torvalds 		if ((len -= copy) == 0)
27491da177e4SLinus Torvalds 			return csum;
27501da177e4SLinus Torvalds 		offset += copy;
27511da177e4SLinus Torvalds 		to     += copy;
27521da177e4SLinus Torvalds 		pos	= copy;
27531da177e4SLinus Torvalds 	}
27541da177e4SLinus Torvalds 
27551da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
27561a028e50SDavid S. Miller 		int end;
27571da177e4SLinus Torvalds 
2758547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
27591a028e50SDavid S. Miller 
27609e903e08SEric Dumazet 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
27611da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2762c613c209SWillem de Bruijn 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2763c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2764c613c209SWillem de Bruijn 			struct page *p;
27655084205fSAl Viro 			__wsum csum2;
27661da177e4SLinus Torvalds 			u8 *vaddr;
27671da177e4SLinus Torvalds 
27681da177e4SLinus Torvalds 			if (copy > len)
27691da177e4SLinus Torvalds 				copy = len;
2770c613c209SWillem de Bruijn 
2771c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2772b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset - start,
2773c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2774c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2775c613c209SWillem de Bruijn 				csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2776c613c209SWillem de Bruijn 								  to + copied,
2777cc44c17bSAl Viro 								  p_len);
277851c56b00SEric Dumazet 				kunmap_atomic(vaddr);
27791da177e4SLinus Torvalds 				csum = csum_block_add(csum, csum2, pos);
2780c613c209SWillem de Bruijn 				pos += p_len;
2781c613c209SWillem de Bruijn 			}
2782c613c209SWillem de Bruijn 
27831da177e4SLinus Torvalds 			if (!(len -= copy))
27841da177e4SLinus Torvalds 				return csum;
27851da177e4SLinus Torvalds 			offset += copy;
27861da177e4SLinus Torvalds 			to     += copy;
27871da177e4SLinus Torvalds 		}
27881a028e50SDavid S. Miller 		start = end;
27891da177e4SLinus Torvalds 	}
27901da177e4SLinus Torvalds 
2791fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
279281d77662SAl Viro 		__wsum csum2;
27931a028e50SDavid S. Miller 		int end;
27941da177e4SLinus Torvalds 
2795547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
27961a028e50SDavid S. Miller 
2797fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
27981da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
27991da177e4SLinus Torvalds 			if (copy > len)
28001da177e4SLinus Torvalds 				copy = len;
2801fbb398a8SDavid S. Miller 			csum2 = skb_copy_and_csum_bits(frag_iter,
28021a028e50SDavid S. Miller 						       offset - start,
28038d5930dfSAl Viro 						       to, copy);
28041da177e4SLinus Torvalds 			csum = csum_block_add(csum, csum2, pos);
28051da177e4SLinus Torvalds 			if ((len -= copy) == 0)
28061da177e4SLinus Torvalds 				return csum;
28071da177e4SLinus Torvalds 			offset += copy;
28081da177e4SLinus Torvalds 			to     += copy;
28091da177e4SLinus Torvalds 			pos    += copy;
28101da177e4SLinus Torvalds 		}
28111a028e50SDavid S. Miller 		start = end;
28121da177e4SLinus Torvalds 	}
281309a62660SKris Katterjohn 	BUG_ON(len);
28141da177e4SLinus Torvalds 	return csum;
28151da177e4SLinus Torvalds }
2816b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits);
28171da177e4SLinus Torvalds 
281849f8e832SCong Wang __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
281949f8e832SCong Wang {
282049f8e832SCong Wang 	__sum16 sum;
282149f8e832SCong Wang 
282249f8e832SCong Wang 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
282314641931SCong Wang 	/* See comments in __skb_checksum_complete(). */
282449f8e832SCong Wang 	if (likely(!sum)) {
282549f8e832SCong Wang 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
282649f8e832SCong Wang 		    !skb->csum_complete_sw)
28277fe50ac8SCong Wang 			netdev_rx_csum_fault(skb->dev, skb);
282849f8e832SCong Wang 	}
282949f8e832SCong Wang 	if (!skb_shared(skb))
283049f8e832SCong Wang 		skb->csum_valid = !sum;
283149f8e832SCong Wang 	return sum;
283249f8e832SCong Wang }
283349f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete_head);
283449f8e832SCong Wang 
283514641931SCong Wang /* This function assumes skb->csum already holds pseudo header's checksum,
283614641931SCong Wang  * which has been changed from the hardware checksum, for example, by
283714641931SCong Wang  * __skb_checksum_validate_complete(). And, the original skb->csum must
283814641931SCong Wang  * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
283914641931SCong Wang  *
284014641931SCong Wang  * It returns non-zero if the recomputed checksum is still invalid, otherwise
284114641931SCong Wang  * zero. The new checksum is stored back into skb->csum unless the skb is
284214641931SCong Wang  * shared.
284314641931SCong Wang  */
284449f8e832SCong Wang __sum16 __skb_checksum_complete(struct sk_buff *skb)
284549f8e832SCong Wang {
284649f8e832SCong Wang 	__wsum csum;
284749f8e832SCong Wang 	__sum16 sum;
284849f8e832SCong Wang 
284949f8e832SCong Wang 	csum = skb_checksum(skb, 0, skb->len, 0);
285049f8e832SCong Wang 
285149f8e832SCong Wang 	sum = csum_fold(csum_add(skb->csum, csum));
285214641931SCong Wang 	/* This check is inverted, because we already knew the hardware
285314641931SCong Wang 	 * checksum is invalid before calling this function. So, if the
285414641931SCong Wang 	 * re-computed checksum is valid instead, then we have a mismatch
285514641931SCong Wang 	 * between the original skb->csum and skb_checksum(). This means either
285614641931SCong Wang 	 * the original hardware checksum is incorrect or we screw up skb->csum
285714641931SCong Wang 	 * when moving skb->data around.
285814641931SCong Wang 	 */
285949f8e832SCong Wang 	if (likely(!sum)) {
286049f8e832SCong Wang 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
286149f8e832SCong Wang 		    !skb->csum_complete_sw)
28627fe50ac8SCong Wang 			netdev_rx_csum_fault(skb->dev, skb);
286349f8e832SCong Wang 	}
286449f8e832SCong Wang 
286549f8e832SCong Wang 	if (!skb_shared(skb)) {
286649f8e832SCong Wang 		/* Save full packet checksum */
286749f8e832SCong Wang 		skb->csum = csum;
286849f8e832SCong Wang 		skb->ip_summed = CHECKSUM_COMPLETE;
286949f8e832SCong Wang 		skb->csum_complete_sw = 1;
287049f8e832SCong Wang 		skb->csum_valid = !sum;
287149f8e832SCong Wang 	}
287249f8e832SCong Wang 
287349f8e832SCong Wang 	return sum;
287449f8e832SCong Wang }
287549f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete);
287649f8e832SCong Wang 
28779617813dSDavide Caratti static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
28789617813dSDavide Caratti {
28799617813dSDavide Caratti 	net_warn_ratelimited(
28809617813dSDavide Caratti 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
28819617813dSDavide Caratti 		__func__);
28829617813dSDavide Caratti 	return 0;
28839617813dSDavide Caratti }
28849617813dSDavide Caratti 
28859617813dSDavide Caratti static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
28869617813dSDavide Caratti 				       int offset, int len)
28879617813dSDavide Caratti {
28889617813dSDavide Caratti 	net_warn_ratelimited(
28899617813dSDavide Caratti 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
28909617813dSDavide Caratti 		__func__);
28919617813dSDavide Caratti 	return 0;
28929617813dSDavide Caratti }
28939617813dSDavide Caratti 
28949617813dSDavide Caratti static const struct skb_checksum_ops default_crc32c_ops = {
28959617813dSDavide Caratti 	.update  = warn_crc32c_csum_update,
28969617813dSDavide Caratti 	.combine = warn_crc32c_csum_combine,
28979617813dSDavide Caratti };
28989617813dSDavide Caratti 
28999617813dSDavide Caratti const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
29009617813dSDavide Caratti 	&default_crc32c_ops;
29019617813dSDavide Caratti EXPORT_SYMBOL(crc32c_csum_stub);
29029617813dSDavide Caratti 
2903af2806f8SThomas Graf  /**
2904af2806f8SThomas Graf  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2905af2806f8SThomas Graf  *	@from: source buffer
2906af2806f8SThomas Graf  *
2907af2806f8SThomas Graf  *	Calculates the amount of linear headroom needed in the 'to' skb passed
2908af2806f8SThomas Graf  *	into skb_zerocopy().
2909af2806f8SThomas Graf  */
2910af2806f8SThomas Graf unsigned int
2911af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from)
2912af2806f8SThomas Graf {
2913af2806f8SThomas Graf 	unsigned int hlen = 0;
2914af2806f8SThomas Graf 
2915af2806f8SThomas Graf 	if (!from->head_frag ||
2916af2806f8SThomas Graf 	    skb_headlen(from) < L1_CACHE_BYTES ||
2917af2806f8SThomas Graf 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2918af2806f8SThomas Graf 		hlen = skb_headlen(from);
2919af2806f8SThomas Graf 
2920af2806f8SThomas Graf 	if (skb_has_frag_list(from))
2921af2806f8SThomas Graf 		hlen = from->len;
2922af2806f8SThomas Graf 
2923af2806f8SThomas Graf 	return hlen;
2924af2806f8SThomas Graf }
2925af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2926af2806f8SThomas Graf 
2927af2806f8SThomas Graf /**
2928af2806f8SThomas Graf  *	skb_zerocopy - Zero copy skb to skb
2929af2806f8SThomas Graf  *	@to: destination buffer
29307fceb4deSMasanari Iida  *	@from: source buffer
2931af2806f8SThomas Graf  *	@len: number of bytes to copy from source buffer
2932af2806f8SThomas Graf  *	@hlen: size of linear headroom in destination buffer
2933af2806f8SThomas Graf  *
2934af2806f8SThomas Graf  *	Copies up to `len` bytes from `from` to `to` by creating references
2935af2806f8SThomas Graf  *	to the frags in the source buffer.
2936af2806f8SThomas Graf  *
2937af2806f8SThomas Graf  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2938af2806f8SThomas Graf  *	headroom in the `to` buffer.
293936d5fe6aSZoltan Kiss  *
294036d5fe6aSZoltan Kiss  *	Return value:
294136d5fe6aSZoltan Kiss  *	0: everything is OK
294236d5fe6aSZoltan Kiss  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
294336d5fe6aSZoltan Kiss  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
2944af2806f8SThomas Graf  */
294536d5fe6aSZoltan Kiss int
294636d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2947af2806f8SThomas Graf {
2948af2806f8SThomas Graf 	int i, j = 0;
2949af2806f8SThomas Graf 	int plen = 0; /* length of skb->head fragment */
295036d5fe6aSZoltan Kiss 	int ret;
2951af2806f8SThomas Graf 	struct page *page;
2952af2806f8SThomas Graf 	unsigned int offset;
2953af2806f8SThomas Graf 
2954af2806f8SThomas Graf 	BUG_ON(!from->head_frag && !hlen);
2955af2806f8SThomas Graf 
2956af2806f8SThomas Graf 	/* dont bother with small payloads */
295736d5fe6aSZoltan Kiss 	if (len <= skb_tailroom(to))
295836d5fe6aSZoltan Kiss 		return skb_copy_bits(from, 0, skb_put(to, len), len);
2959af2806f8SThomas Graf 
2960af2806f8SThomas Graf 	if (hlen) {
296136d5fe6aSZoltan Kiss 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
296236d5fe6aSZoltan Kiss 		if (unlikely(ret))
296336d5fe6aSZoltan Kiss 			return ret;
2964af2806f8SThomas Graf 		len -= hlen;
2965af2806f8SThomas Graf 	} else {
2966af2806f8SThomas Graf 		plen = min_t(int, skb_headlen(from), len);
2967af2806f8SThomas Graf 		if (plen) {
2968af2806f8SThomas Graf 			page = virt_to_head_page(from->head);
2969af2806f8SThomas Graf 			offset = from->data - (unsigned char *)page_address(page);
2970af2806f8SThomas Graf 			__skb_fill_page_desc(to, 0, page, offset, plen);
2971af2806f8SThomas Graf 			get_page(page);
2972af2806f8SThomas Graf 			j = 1;
2973af2806f8SThomas Graf 			len -= plen;
2974af2806f8SThomas Graf 		}
2975af2806f8SThomas Graf 	}
2976af2806f8SThomas Graf 
2977af2806f8SThomas Graf 	to->truesize += len + plen;
2978af2806f8SThomas Graf 	to->len += len + plen;
2979af2806f8SThomas Graf 	to->data_len += len + plen;
2980af2806f8SThomas Graf 
298136d5fe6aSZoltan Kiss 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
298236d5fe6aSZoltan Kiss 		skb_tx_error(from);
298336d5fe6aSZoltan Kiss 		return -ENOMEM;
298436d5fe6aSZoltan Kiss 	}
29851f8b977aSWillem de Bruijn 	skb_zerocopy_clone(to, from, GFP_ATOMIC);
298636d5fe6aSZoltan Kiss 
2987af2806f8SThomas Graf 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2988d8e18a51SMatthew Wilcox (Oracle) 		int size;
2989d8e18a51SMatthew Wilcox (Oracle) 
2990af2806f8SThomas Graf 		if (!len)
2991af2806f8SThomas Graf 			break;
2992af2806f8SThomas Graf 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2993d8e18a51SMatthew Wilcox (Oracle) 		size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
2994d8e18a51SMatthew Wilcox (Oracle) 					len);
2995d8e18a51SMatthew Wilcox (Oracle) 		skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
2996d8e18a51SMatthew Wilcox (Oracle) 		len -= size;
2997af2806f8SThomas Graf 		skb_frag_ref(to, j);
2998af2806f8SThomas Graf 		j++;
2999af2806f8SThomas Graf 	}
3000af2806f8SThomas Graf 	skb_shinfo(to)->nr_frags = j;
300136d5fe6aSZoltan Kiss 
300236d5fe6aSZoltan Kiss 	return 0;
3003af2806f8SThomas Graf }
3004af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy);
3005af2806f8SThomas Graf 
30061da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
30071da177e4SLinus Torvalds {
3008d3bc23e7SAl Viro 	__wsum csum;
30091da177e4SLinus Torvalds 	long csstart;
30101da177e4SLinus Torvalds 
301184fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL)
301255508d60SMichał Mirosław 		csstart = skb_checksum_start_offset(skb);
30131da177e4SLinus Torvalds 	else
30141da177e4SLinus Torvalds 		csstart = skb_headlen(skb);
30151da177e4SLinus Torvalds 
301609a62660SKris Katterjohn 	BUG_ON(csstart > skb_headlen(skb));
30171da177e4SLinus Torvalds 
3018d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data(skb, to, csstart);
30191da177e4SLinus Torvalds 
30201da177e4SLinus Torvalds 	csum = 0;
30211da177e4SLinus Torvalds 	if (csstart != skb->len)
30221da177e4SLinus Torvalds 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
30238d5930dfSAl Viro 					      skb->len - csstart);
30241da177e4SLinus Torvalds 
302584fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3026ff1dcadbSAl Viro 		long csstuff = csstart + skb->csum_offset;
30271da177e4SLinus Torvalds 
3028d3bc23e7SAl Viro 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
30291da177e4SLinus Torvalds 	}
30301da177e4SLinus Torvalds }
3031b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev);
30321da177e4SLinus Torvalds 
30331da177e4SLinus Torvalds /**
30341da177e4SLinus Torvalds  *	skb_dequeue - remove from the head of the queue
30351da177e4SLinus Torvalds  *	@list: list to dequeue from
30361da177e4SLinus Torvalds  *
30371da177e4SLinus Torvalds  *	Remove the head of the list. The list lock is taken so the function
30381da177e4SLinus Torvalds  *	may be used safely with other locking list functions. The head item is
30391da177e4SLinus Torvalds  *	returned or %NULL if the list is empty.
30401da177e4SLinus Torvalds  */
30411da177e4SLinus Torvalds 
30421da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list)
30431da177e4SLinus Torvalds {
30441da177e4SLinus Torvalds 	unsigned long flags;
30451da177e4SLinus Torvalds 	struct sk_buff *result;
30461da177e4SLinus Torvalds 
30471da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
30481da177e4SLinus Torvalds 	result = __skb_dequeue(list);
30491da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
30501da177e4SLinus Torvalds 	return result;
30511da177e4SLinus Torvalds }
3052b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue);
30531da177e4SLinus Torvalds 
30541da177e4SLinus Torvalds /**
30551da177e4SLinus Torvalds  *	skb_dequeue_tail - remove from the tail of the queue
30561da177e4SLinus Torvalds  *	@list: list to dequeue from
30571da177e4SLinus Torvalds  *
30581da177e4SLinus Torvalds  *	Remove the tail of the list. The list lock is taken so the function
30591da177e4SLinus Torvalds  *	may be used safely with other locking list functions. The tail item is
30601da177e4SLinus Torvalds  *	returned or %NULL if the list is empty.
30611da177e4SLinus Torvalds  */
30621da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
30631da177e4SLinus Torvalds {
30641da177e4SLinus Torvalds 	unsigned long flags;
30651da177e4SLinus Torvalds 	struct sk_buff *result;
30661da177e4SLinus Torvalds 
30671da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
30681da177e4SLinus Torvalds 	result = __skb_dequeue_tail(list);
30691da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
30701da177e4SLinus Torvalds 	return result;
30711da177e4SLinus Torvalds }
3072b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail);
30731da177e4SLinus Torvalds 
30741da177e4SLinus Torvalds /**
30751da177e4SLinus Torvalds  *	skb_queue_purge - empty a list
30761da177e4SLinus Torvalds  *	@list: list to empty
30771da177e4SLinus Torvalds  *
30781da177e4SLinus Torvalds  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
30791da177e4SLinus Torvalds  *	the list and one reference dropped. This function takes the list
30801da177e4SLinus Torvalds  *	lock and is atomic with respect to other list locking functions.
30811da177e4SLinus Torvalds  */
30821da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list)
30831da177e4SLinus Torvalds {
30841da177e4SLinus Torvalds 	struct sk_buff *skb;
30851da177e4SLinus Torvalds 	while ((skb = skb_dequeue(list)) != NULL)
30861da177e4SLinus Torvalds 		kfree_skb(skb);
30871da177e4SLinus Torvalds }
3088b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge);
30891da177e4SLinus Torvalds 
30901da177e4SLinus Torvalds /**
30919f5afeaeSYaogong Wang  *	skb_rbtree_purge - empty a skb rbtree
30929f5afeaeSYaogong Wang  *	@root: root of the rbtree to empty
3093385114deSPeter Oskolkov  *	Return value: the sum of truesizes of all purged skbs.
30949f5afeaeSYaogong Wang  *
30959f5afeaeSYaogong Wang  *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
30969f5afeaeSYaogong Wang  *	the list and one reference dropped. This function does not take
30979f5afeaeSYaogong Wang  *	any lock. Synchronization should be handled by the caller (e.g., TCP
30989f5afeaeSYaogong Wang  *	out-of-order queue is protected by the socket lock).
30999f5afeaeSYaogong Wang  */
3100385114deSPeter Oskolkov unsigned int skb_rbtree_purge(struct rb_root *root)
31019f5afeaeSYaogong Wang {
31027c90584cSEric Dumazet 	struct rb_node *p = rb_first(root);
3103385114deSPeter Oskolkov 	unsigned int sum = 0;
31049f5afeaeSYaogong Wang 
31057c90584cSEric Dumazet 	while (p) {
31067c90584cSEric Dumazet 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
31077c90584cSEric Dumazet 
31087c90584cSEric Dumazet 		p = rb_next(p);
31097c90584cSEric Dumazet 		rb_erase(&skb->rbnode, root);
3110385114deSPeter Oskolkov 		sum += skb->truesize;
31119f5afeaeSYaogong Wang 		kfree_skb(skb);
31127c90584cSEric Dumazet 	}
3113385114deSPeter Oskolkov 	return sum;
31149f5afeaeSYaogong Wang }
31159f5afeaeSYaogong Wang 
31169f5afeaeSYaogong Wang /**
31171da177e4SLinus Torvalds  *	skb_queue_head - queue a buffer at the list head
31181da177e4SLinus Torvalds  *	@list: list to use
31191da177e4SLinus Torvalds  *	@newsk: buffer to queue
31201da177e4SLinus Torvalds  *
31211da177e4SLinus Torvalds  *	Queue a buffer at the start of the list. This function takes the
31221da177e4SLinus Torvalds  *	list lock and can be used safely with other locking &sk_buff functions
31231da177e4SLinus Torvalds  *	safely.
31241da177e4SLinus Torvalds  *
31251da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
31261da177e4SLinus Torvalds  */
31271da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
31281da177e4SLinus Torvalds {
31291da177e4SLinus Torvalds 	unsigned long flags;
31301da177e4SLinus Torvalds 
31311da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
31321da177e4SLinus Torvalds 	__skb_queue_head(list, newsk);
31331da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
31341da177e4SLinus Torvalds }
3135b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head);
31361da177e4SLinus Torvalds 
31371da177e4SLinus Torvalds /**
31381da177e4SLinus Torvalds  *	skb_queue_tail - queue a buffer at the list tail
31391da177e4SLinus Torvalds  *	@list: list to use
31401da177e4SLinus Torvalds  *	@newsk: buffer to queue
31411da177e4SLinus Torvalds  *
31421da177e4SLinus Torvalds  *	Queue a buffer at the tail of the list. This function takes the
31431da177e4SLinus Torvalds  *	list lock and can be used safely with other locking &sk_buff functions
31441da177e4SLinus Torvalds  *	safely.
31451da177e4SLinus Torvalds  *
31461da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
31471da177e4SLinus Torvalds  */
31481da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
31491da177e4SLinus Torvalds {
31501da177e4SLinus Torvalds 	unsigned long flags;
31511da177e4SLinus Torvalds 
31521da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
31531da177e4SLinus Torvalds 	__skb_queue_tail(list, newsk);
31541da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
31551da177e4SLinus Torvalds }
3156b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail);
31578728b834SDavid S. Miller 
31581da177e4SLinus Torvalds /**
31591da177e4SLinus Torvalds  *	skb_unlink	-	remove a buffer from a list
31601da177e4SLinus Torvalds  *	@skb: buffer to remove
31618728b834SDavid S. Miller  *	@list: list to use
31621da177e4SLinus Torvalds  *
31638728b834SDavid S. Miller  *	Remove a packet from a list. The list locks are taken and this
31648728b834SDavid S. Miller  *	function is atomic with respect to other list locked calls
31651da177e4SLinus Torvalds  *
31668728b834SDavid S. Miller  *	You must know what list the SKB is on.
31671da177e4SLinus Torvalds  */
31688728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
31691da177e4SLinus Torvalds {
31701da177e4SLinus Torvalds 	unsigned long flags;
31711da177e4SLinus Torvalds 
31721da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
31738728b834SDavid S. Miller 	__skb_unlink(skb, list);
31741da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
31751da177e4SLinus Torvalds }
3176b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink);
31771da177e4SLinus Torvalds 
31781da177e4SLinus Torvalds /**
31791da177e4SLinus Torvalds  *	skb_append	-	append a buffer
31801da177e4SLinus Torvalds  *	@old: buffer to insert after
31811da177e4SLinus Torvalds  *	@newsk: buffer to insert
31828728b834SDavid S. Miller  *	@list: list to use
31831da177e4SLinus Torvalds  *
31841da177e4SLinus Torvalds  *	Place a packet after a given packet in a list. The list locks are taken
31851da177e4SLinus Torvalds  *	and this function is atomic with respect to other list locked calls.
31861da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
31871da177e4SLinus Torvalds  */
31888728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
31891da177e4SLinus Torvalds {
31901da177e4SLinus Torvalds 	unsigned long flags;
31911da177e4SLinus Torvalds 
31928728b834SDavid S. Miller 	spin_lock_irqsave(&list->lock, flags);
31937de6c033SGerrit Renker 	__skb_queue_after(list, old, newsk);
31948728b834SDavid S. Miller 	spin_unlock_irqrestore(&list->lock, flags);
31951da177e4SLinus Torvalds }
3196b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append);
31971da177e4SLinus Torvalds 
31981da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb,
31991da177e4SLinus Torvalds 					   struct sk_buff* skb1,
32001da177e4SLinus Torvalds 					   const u32 len, const int pos)
32011da177e4SLinus Torvalds {
32021da177e4SLinus Torvalds 	int i;
32031da177e4SLinus Torvalds 
3204d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3205d626f62bSArnaldo Carvalho de Melo 					 pos - len);
32061da177e4SLinus Torvalds 	/* And move data appendix as is. */
32071da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
32081da177e4SLinus Torvalds 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
32091da177e4SLinus Torvalds 
32101da177e4SLinus Torvalds 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
32111da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags  = 0;
32121da177e4SLinus Torvalds 	skb1->data_len		   = skb->data_len;
32131da177e4SLinus Torvalds 	skb1->len		   += skb1->data_len;
32141da177e4SLinus Torvalds 	skb->data_len		   = 0;
32151da177e4SLinus Torvalds 	skb->len		   = len;
321627a884dcSArnaldo Carvalho de Melo 	skb_set_tail_pointer(skb, len);
32171da177e4SLinus Torvalds }
32181da177e4SLinus Torvalds 
32191da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb,
32201da177e4SLinus Torvalds 				       struct sk_buff* skb1,
32211da177e4SLinus Torvalds 				       const u32 len, int pos)
32221da177e4SLinus Torvalds {
32231da177e4SLinus Torvalds 	int i, k = 0;
32241da177e4SLinus Torvalds 	const int nfrags = skb_shinfo(skb)->nr_frags;
32251da177e4SLinus Torvalds 
32261da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = 0;
32271da177e4SLinus Torvalds 	skb1->len		  = skb1->data_len = skb->len - len;
32281da177e4SLinus Torvalds 	skb->len		  = len;
32291da177e4SLinus Torvalds 	skb->data_len		  = len - pos;
32301da177e4SLinus Torvalds 
32311da177e4SLinus Torvalds 	for (i = 0; i < nfrags; i++) {
32329e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
32331da177e4SLinus Torvalds 
32341da177e4SLinus Torvalds 		if (pos + size > len) {
32351da177e4SLinus Torvalds 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
32361da177e4SLinus Torvalds 
32371da177e4SLinus Torvalds 			if (pos < len) {
32381da177e4SLinus Torvalds 				/* Split frag.
32391da177e4SLinus Torvalds 				 * We have two variants in this case:
32401da177e4SLinus Torvalds 				 * 1. Move all the frag to the second
32411da177e4SLinus Torvalds 				 *    part, if it is possible. F.e.
32421da177e4SLinus Torvalds 				 *    this approach is mandatory for TUX,
32431da177e4SLinus Torvalds 				 *    where splitting is expensive.
32441da177e4SLinus Torvalds 				 * 2. Split is accurately. We make this.
32451da177e4SLinus Torvalds 				 */
3246ea2ab693SIan Campbell 				skb_frag_ref(skb, i);
3247b54c9d5bSJonathan Lemon 				skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
32489e903e08SEric Dumazet 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
32499e903e08SEric Dumazet 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
32501da177e4SLinus Torvalds 				skb_shinfo(skb)->nr_frags++;
32511da177e4SLinus Torvalds 			}
32521da177e4SLinus Torvalds 			k++;
32531da177e4SLinus Torvalds 		} else
32541da177e4SLinus Torvalds 			skb_shinfo(skb)->nr_frags++;
32551da177e4SLinus Torvalds 		pos += size;
32561da177e4SLinus Torvalds 	}
32571da177e4SLinus Torvalds 	skb_shinfo(skb1)->nr_frags = k;
32581da177e4SLinus Torvalds }
32591da177e4SLinus Torvalds 
32601da177e4SLinus Torvalds /**
32611da177e4SLinus Torvalds  * skb_split - Split fragmented skb to two parts at length len.
32621da177e4SLinus Torvalds  * @skb: the buffer to split
32631da177e4SLinus Torvalds  * @skb1: the buffer to receive the second part
32641da177e4SLinus Torvalds  * @len: new length for skb
32651da177e4SLinus Torvalds  */
32661da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
32671da177e4SLinus Torvalds {
32681da177e4SLinus Torvalds 	int pos = skb_headlen(skb);
32691da177e4SLinus Torvalds 
3270fff88030SWillem de Bruijn 	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3271fff88030SWillem de Bruijn 				      SKBTX_SHARED_FRAG;
32721f8b977aSWillem de Bruijn 	skb_zerocopy_clone(skb1, skb, 0);
32731da177e4SLinus Torvalds 	if (len < pos)	/* Split line is inside header. */
32741da177e4SLinus Torvalds 		skb_split_inside_header(skb, skb1, len, pos);
32751da177e4SLinus Torvalds 	else		/* Second chunk has no header, nothing to copy. */
32761da177e4SLinus Torvalds 		skb_split_no_header(skb, skb1, len, pos);
32771da177e4SLinus Torvalds }
3278b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split);
32791da177e4SLinus Torvalds 
32809f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go.
32819f782db3SIlpo Järvinen  *
32829f782db3SIlpo Järvinen  * Caller cannot keep skb_shinfo related pointers past calling here!
32839f782db3SIlpo Järvinen  */
3284832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb)
3285832d11c5SIlpo Järvinen {
32860ace2856SIlpo Järvinen 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3287832d11c5SIlpo Järvinen }
3288832d11c5SIlpo Järvinen 
3289832d11c5SIlpo Järvinen /**
3290832d11c5SIlpo Järvinen  * skb_shift - Shifts paged data partially from skb to another
3291832d11c5SIlpo Järvinen  * @tgt: buffer into which tail data gets added
3292832d11c5SIlpo Järvinen  * @skb: buffer from which the paged data comes from
3293832d11c5SIlpo Järvinen  * @shiftlen: shift up to this many bytes
3294832d11c5SIlpo Järvinen  *
3295832d11c5SIlpo Järvinen  * Attempts to shift up to shiftlen worth of bytes, which may be less than
329620e994a0SFeng King  * the length of the skb, from skb to tgt. Returns number bytes shifted.
3297832d11c5SIlpo Järvinen  * It's up to caller to free skb if everything was shifted.
3298832d11c5SIlpo Järvinen  *
3299832d11c5SIlpo Järvinen  * If @tgt runs out of frags, the whole operation is aborted.
3300832d11c5SIlpo Järvinen  *
3301832d11c5SIlpo Järvinen  * Skb cannot include anything else but paged data while tgt is allowed
3302832d11c5SIlpo Järvinen  * to have non-paged data as well.
3303832d11c5SIlpo Järvinen  *
3304832d11c5SIlpo Järvinen  * TODO: full sized shift could be optimized but that would need
3305832d11c5SIlpo Järvinen  * specialized skb free'er to handle frags without up-to-date nr_frags.
3306832d11c5SIlpo Järvinen  */
3307832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3308832d11c5SIlpo Järvinen {
3309832d11c5SIlpo Järvinen 	int from, to, merge, todo;
3310d8e18a51SMatthew Wilcox (Oracle) 	skb_frag_t *fragfrom, *fragto;
3311832d11c5SIlpo Järvinen 
3312832d11c5SIlpo Järvinen 	BUG_ON(shiftlen > skb->len);
3313f8071cdeSEric Dumazet 
3314f8071cdeSEric Dumazet 	if (skb_headlen(skb))
3315f8071cdeSEric Dumazet 		return 0;
33161f8b977aSWillem de Bruijn 	if (skb_zcopy(tgt) || skb_zcopy(skb))
33171f8b977aSWillem de Bruijn 		return 0;
3318832d11c5SIlpo Järvinen 
3319832d11c5SIlpo Järvinen 	todo = shiftlen;
3320832d11c5SIlpo Järvinen 	from = 0;
3321832d11c5SIlpo Järvinen 	to = skb_shinfo(tgt)->nr_frags;
3322832d11c5SIlpo Järvinen 	fragfrom = &skb_shinfo(skb)->frags[from];
3323832d11c5SIlpo Järvinen 
3324832d11c5SIlpo Järvinen 	/* Actual merge is delayed until the point when we know we can
3325832d11c5SIlpo Järvinen 	 * commit all, so that we don't have to undo partial changes
3326832d11c5SIlpo Järvinen 	 */
3327832d11c5SIlpo Järvinen 	if (!to ||
3328ea2ab693SIan Campbell 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3329b54c9d5bSJonathan Lemon 			      skb_frag_off(fragfrom))) {
3330832d11c5SIlpo Järvinen 		merge = -1;
3331832d11c5SIlpo Järvinen 	} else {
3332832d11c5SIlpo Järvinen 		merge = to - 1;
3333832d11c5SIlpo Järvinen 
33349e903e08SEric Dumazet 		todo -= skb_frag_size(fragfrom);
3335832d11c5SIlpo Järvinen 		if (todo < 0) {
3336832d11c5SIlpo Järvinen 			if (skb_prepare_for_shift(skb) ||
3337832d11c5SIlpo Järvinen 			    skb_prepare_for_shift(tgt))
3338832d11c5SIlpo Järvinen 				return 0;
3339832d11c5SIlpo Järvinen 
33409f782db3SIlpo Järvinen 			/* All previous frag pointers might be stale! */
33419f782db3SIlpo Järvinen 			fragfrom = &skb_shinfo(skb)->frags[from];
3342832d11c5SIlpo Järvinen 			fragto = &skb_shinfo(tgt)->frags[merge];
3343832d11c5SIlpo Järvinen 
33449e903e08SEric Dumazet 			skb_frag_size_add(fragto, shiftlen);
33459e903e08SEric Dumazet 			skb_frag_size_sub(fragfrom, shiftlen);
3346b54c9d5bSJonathan Lemon 			skb_frag_off_add(fragfrom, shiftlen);
3347832d11c5SIlpo Järvinen 
3348832d11c5SIlpo Järvinen 			goto onlymerged;
3349832d11c5SIlpo Järvinen 		}
3350832d11c5SIlpo Järvinen 
3351832d11c5SIlpo Järvinen 		from++;
3352832d11c5SIlpo Järvinen 	}
3353832d11c5SIlpo Järvinen 
3354832d11c5SIlpo Järvinen 	/* Skip full, not-fitting skb to avoid expensive operations */
3355832d11c5SIlpo Järvinen 	if ((shiftlen == skb->len) &&
3356832d11c5SIlpo Järvinen 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3357832d11c5SIlpo Järvinen 		return 0;
3358832d11c5SIlpo Järvinen 
3359832d11c5SIlpo Järvinen 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3360832d11c5SIlpo Järvinen 		return 0;
3361832d11c5SIlpo Järvinen 
3362832d11c5SIlpo Järvinen 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3363832d11c5SIlpo Järvinen 		if (to == MAX_SKB_FRAGS)
3364832d11c5SIlpo Järvinen 			return 0;
3365832d11c5SIlpo Järvinen 
3366832d11c5SIlpo Järvinen 		fragfrom = &skb_shinfo(skb)->frags[from];
3367832d11c5SIlpo Järvinen 		fragto = &skb_shinfo(tgt)->frags[to];
3368832d11c5SIlpo Järvinen 
33699e903e08SEric Dumazet 		if (todo >= skb_frag_size(fragfrom)) {
3370832d11c5SIlpo Järvinen 			*fragto = *fragfrom;
33719e903e08SEric Dumazet 			todo -= skb_frag_size(fragfrom);
3372832d11c5SIlpo Järvinen 			from++;
3373832d11c5SIlpo Järvinen 			to++;
3374832d11c5SIlpo Järvinen 
3375832d11c5SIlpo Järvinen 		} else {
3376ea2ab693SIan Campbell 			__skb_frag_ref(fragfrom);
3377b54c9d5bSJonathan Lemon 			skb_frag_page_copy(fragto, fragfrom);
3378b54c9d5bSJonathan Lemon 			skb_frag_off_copy(fragto, fragfrom);
33799e903e08SEric Dumazet 			skb_frag_size_set(fragto, todo);
3380832d11c5SIlpo Järvinen 
3381b54c9d5bSJonathan Lemon 			skb_frag_off_add(fragfrom, todo);
33829e903e08SEric Dumazet 			skb_frag_size_sub(fragfrom, todo);
3383832d11c5SIlpo Järvinen 			todo = 0;
3384832d11c5SIlpo Järvinen 
3385832d11c5SIlpo Järvinen 			to++;
3386832d11c5SIlpo Järvinen 			break;
3387832d11c5SIlpo Järvinen 		}
3388832d11c5SIlpo Järvinen 	}
3389832d11c5SIlpo Järvinen 
3390832d11c5SIlpo Järvinen 	/* Ready to "commit" this state change to tgt */
3391832d11c5SIlpo Järvinen 	skb_shinfo(tgt)->nr_frags = to;
3392832d11c5SIlpo Järvinen 
3393832d11c5SIlpo Järvinen 	if (merge >= 0) {
3394832d11c5SIlpo Järvinen 		fragfrom = &skb_shinfo(skb)->frags[0];
3395832d11c5SIlpo Järvinen 		fragto = &skb_shinfo(tgt)->frags[merge];
3396832d11c5SIlpo Järvinen 
33979e903e08SEric Dumazet 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3398ea2ab693SIan Campbell 		__skb_frag_unref(fragfrom);
3399832d11c5SIlpo Järvinen 	}
3400832d11c5SIlpo Järvinen 
3401832d11c5SIlpo Järvinen 	/* Reposition in the original skb */
3402832d11c5SIlpo Järvinen 	to = 0;
3403832d11c5SIlpo Järvinen 	while (from < skb_shinfo(skb)->nr_frags)
3404832d11c5SIlpo Järvinen 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3405832d11c5SIlpo Järvinen 	skb_shinfo(skb)->nr_frags = to;
3406832d11c5SIlpo Järvinen 
3407832d11c5SIlpo Järvinen 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3408832d11c5SIlpo Järvinen 
3409832d11c5SIlpo Järvinen onlymerged:
3410832d11c5SIlpo Järvinen 	/* Most likely the tgt won't ever need its checksum anymore, skb on
3411832d11c5SIlpo Järvinen 	 * the other hand might need it if it needs to be resent
3412832d11c5SIlpo Järvinen 	 */
3413832d11c5SIlpo Järvinen 	tgt->ip_summed = CHECKSUM_PARTIAL;
3414832d11c5SIlpo Järvinen 	skb->ip_summed = CHECKSUM_PARTIAL;
3415832d11c5SIlpo Järvinen 
3416832d11c5SIlpo Järvinen 	/* Yak, is it really working this way? Some helper please? */
3417832d11c5SIlpo Järvinen 	skb->len -= shiftlen;
3418832d11c5SIlpo Järvinen 	skb->data_len -= shiftlen;
3419832d11c5SIlpo Järvinen 	skb->truesize -= shiftlen;
3420832d11c5SIlpo Järvinen 	tgt->len += shiftlen;
3421832d11c5SIlpo Järvinen 	tgt->data_len += shiftlen;
3422832d11c5SIlpo Järvinen 	tgt->truesize += shiftlen;
3423832d11c5SIlpo Järvinen 
3424832d11c5SIlpo Järvinen 	return shiftlen;
3425832d11c5SIlpo Järvinen }
3426832d11c5SIlpo Järvinen 
3427677e90edSThomas Graf /**
3428677e90edSThomas Graf  * skb_prepare_seq_read - Prepare a sequential read of skb data
3429677e90edSThomas Graf  * @skb: the buffer to read
3430677e90edSThomas Graf  * @from: lower offset of data to be read
3431677e90edSThomas Graf  * @to: upper offset of data to be read
3432677e90edSThomas Graf  * @st: state variable
3433677e90edSThomas Graf  *
3434677e90edSThomas Graf  * Initializes the specified state variable. Must be called before
3435677e90edSThomas Graf  * invoking skb_seq_read() for the first time.
3436677e90edSThomas Graf  */
3437677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3438677e90edSThomas Graf 			  unsigned int to, struct skb_seq_state *st)
3439677e90edSThomas Graf {
3440677e90edSThomas Graf 	st->lower_offset = from;
3441677e90edSThomas Graf 	st->upper_offset = to;
3442677e90edSThomas Graf 	st->root_skb = st->cur_skb = skb;
3443677e90edSThomas Graf 	st->frag_idx = st->stepped_offset = 0;
3444677e90edSThomas Graf 	st->frag_data = NULL;
3445*97550f6fSWillem de Bruijn 	st->frag_off = 0;
3446677e90edSThomas Graf }
3447b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read);
3448677e90edSThomas Graf 
3449677e90edSThomas Graf /**
3450677e90edSThomas Graf  * skb_seq_read - Sequentially read skb data
3451677e90edSThomas Graf  * @consumed: number of bytes consumed by the caller so far
3452677e90edSThomas Graf  * @data: destination pointer for data to be returned
3453677e90edSThomas Graf  * @st: state variable
3454677e90edSThomas Graf  *
3455bc32383cSMathias Krause  * Reads a block of skb data at @consumed relative to the
3456677e90edSThomas Graf  * lower offset specified to skb_prepare_seq_read(). Assigns
3457bc32383cSMathias Krause  * the head of the data block to @data and returns the length
3458677e90edSThomas Graf  * of the block or 0 if the end of the skb data or the upper
3459677e90edSThomas Graf  * offset has been reached.
3460677e90edSThomas Graf  *
3461677e90edSThomas Graf  * The caller is not required to consume all of the data
3462bc32383cSMathias Krause  * returned, i.e. @consumed is typically set to the number
3463677e90edSThomas Graf  * of bytes already consumed and the next call to
3464677e90edSThomas Graf  * skb_seq_read() will return the remaining part of the block.
3465677e90edSThomas Graf  *
346625985edcSLucas De Marchi  * Note 1: The size of each block of data returned can be arbitrary,
3467e793c0f7SMasanari Iida  *       this limitation is the cost for zerocopy sequential
3468677e90edSThomas Graf  *       reads of potentially non linear data.
3469677e90edSThomas Graf  *
3470bc2cda1eSRandy Dunlap  * Note 2: Fragment lists within fragments are not implemented
3471677e90edSThomas Graf  *       at the moment, state->root_skb could be replaced with
3472677e90edSThomas Graf  *       a stack for this purpose.
3473677e90edSThomas Graf  */
3474677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3475677e90edSThomas Graf 			  struct skb_seq_state *st)
3476677e90edSThomas Graf {
3477677e90edSThomas Graf 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3478677e90edSThomas Graf 	skb_frag_t *frag;
3479677e90edSThomas Graf 
3480aeb193eaSWedson Almeida Filho 	if (unlikely(abs_offset >= st->upper_offset)) {
3481aeb193eaSWedson Almeida Filho 		if (st->frag_data) {
3482aeb193eaSWedson Almeida Filho 			kunmap_atomic(st->frag_data);
3483aeb193eaSWedson Almeida Filho 			st->frag_data = NULL;
3484aeb193eaSWedson Almeida Filho 		}
3485677e90edSThomas Graf 		return 0;
3486aeb193eaSWedson Almeida Filho 	}
3487677e90edSThomas Graf 
3488677e90edSThomas Graf next_skb:
348995e3b24cSHerbert Xu 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3490677e90edSThomas Graf 
3491995b3379SThomas Chenault 	if (abs_offset < block_limit && !st->frag_data) {
349295e3b24cSHerbert Xu 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3493677e90edSThomas Graf 		return block_limit - abs_offset;
3494677e90edSThomas Graf 	}
3495677e90edSThomas Graf 
3496677e90edSThomas Graf 	if (st->frag_idx == 0 && !st->frag_data)
3497677e90edSThomas Graf 		st->stepped_offset += skb_headlen(st->cur_skb);
3498677e90edSThomas Graf 
3499677e90edSThomas Graf 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3500*97550f6fSWillem de Bruijn 		unsigned int pg_idx, pg_off, pg_sz;
3501677e90edSThomas Graf 
3502*97550f6fSWillem de Bruijn 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3503*97550f6fSWillem de Bruijn 
3504*97550f6fSWillem de Bruijn 		pg_idx = 0;
3505*97550f6fSWillem de Bruijn 		pg_off = skb_frag_off(frag);
3506*97550f6fSWillem de Bruijn 		pg_sz = skb_frag_size(frag);
3507*97550f6fSWillem de Bruijn 
3508*97550f6fSWillem de Bruijn 		if (skb_frag_must_loop(skb_frag_page(frag))) {
3509*97550f6fSWillem de Bruijn 			pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
3510*97550f6fSWillem de Bruijn 			pg_off = offset_in_page(pg_off + st->frag_off);
3511*97550f6fSWillem de Bruijn 			pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
3512*97550f6fSWillem de Bruijn 						    PAGE_SIZE - pg_off);
3513*97550f6fSWillem de Bruijn 		}
3514*97550f6fSWillem de Bruijn 
3515*97550f6fSWillem de Bruijn 		block_limit = pg_sz + st->stepped_offset;
3516677e90edSThomas Graf 		if (abs_offset < block_limit) {
3517677e90edSThomas Graf 			if (!st->frag_data)
3518*97550f6fSWillem de Bruijn 				st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
3519677e90edSThomas Graf 
3520*97550f6fSWillem de Bruijn 			*data = (u8 *)st->frag_data + pg_off +
3521677e90edSThomas Graf 				(abs_offset - st->stepped_offset);
3522677e90edSThomas Graf 
3523677e90edSThomas Graf 			return block_limit - abs_offset;
3524677e90edSThomas Graf 		}
3525677e90edSThomas Graf 
3526677e90edSThomas Graf 		if (st->frag_data) {
352751c56b00SEric Dumazet 			kunmap_atomic(st->frag_data);
3528677e90edSThomas Graf 			st->frag_data = NULL;
3529677e90edSThomas Graf 		}
3530677e90edSThomas Graf 
3531*97550f6fSWillem de Bruijn 		st->stepped_offset += pg_sz;
3532*97550f6fSWillem de Bruijn 		st->frag_off += pg_sz;
3533*97550f6fSWillem de Bruijn 		if (st->frag_off == skb_frag_size(frag)) {
3534*97550f6fSWillem de Bruijn 			st->frag_off = 0;
3535677e90edSThomas Graf 			st->frag_idx++;
3536*97550f6fSWillem de Bruijn 		}
3537677e90edSThomas Graf 	}
3538677e90edSThomas Graf 
35395b5a60daSOlaf Kirch 	if (st->frag_data) {
354051c56b00SEric Dumazet 		kunmap_atomic(st->frag_data);
35415b5a60daSOlaf Kirch 		st->frag_data = NULL;
35425b5a60daSOlaf Kirch 	}
35435b5a60daSOlaf Kirch 
354421dc3301SDavid S. Miller 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3545677e90edSThomas Graf 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
354695e3b24cSHerbert Xu 		st->frag_idx = 0;
3547677e90edSThomas Graf 		goto next_skb;
354871b3346dSShyam Iyer 	} else if (st->cur_skb->next) {
354971b3346dSShyam Iyer 		st->cur_skb = st->cur_skb->next;
355071b3346dSShyam Iyer 		st->frag_idx = 0;
3551677e90edSThomas Graf 		goto next_skb;
3552677e90edSThomas Graf 	}
3553677e90edSThomas Graf 
3554677e90edSThomas Graf 	return 0;
3555677e90edSThomas Graf }
3556b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read);
3557677e90edSThomas Graf 
3558677e90edSThomas Graf /**
3559677e90edSThomas Graf  * skb_abort_seq_read - Abort a sequential read of skb data
3560677e90edSThomas Graf  * @st: state variable
3561677e90edSThomas Graf  *
3562677e90edSThomas Graf  * Must be called if skb_seq_read() was not called until it
3563677e90edSThomas Graf  * returned 0.
3564677e90edSThomas Graf  */
3565677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st)
3566677e90edSThomas Graf {
3567677e90edSThomas Graf 	if (st->frag_data)
356851c56b00SEric Dumazet 		kunmap_atomic(st->frag_data);
3569677e90edSThomas Graf }
3570b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read);
3571677e90edSThomas Graf 
35723fc7e8a6SThomas Graf #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
35733fc7e8a6SThomas Graf 
35743fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
35753fc7e8a6SThomas Graf 					  struct ts_config *conf,
35763fc7e8a6SThomas Graf 					  struct ts_state *state)
35773fc7e8a6SThomas Graf {
35783fc7e8a6SThomas Graf 	return skb_seq_read(offset, text, TS_SKB_CB(state));
35793fc7e8a6SThomas Graf }
35803fc7e8a6SThomas Graf 
35813fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
35823fc7e8a6SThomas Graf {
35833fc7e8a6SThomas Graf 	skb_abort_seq_read(TS_SKB_CB(state));
35843fc7e8a6SThomas Graf }
35853fc7e8a6SThomas Graf 
35863fc7e8a6SThomas Graf /**
35873fc7e8a6SThomas Graf  * skb_find_text - Find a text pattern in skb data
35883fc7e8a6SThomas Graf  * @skb: the buffer to look in
35893fc7e8a6SThomas Graf  * @from: search offset
35903fc7e8a6SThomas Graf  * @to: search limit
35913fc7e8a6SThomas Graf  * @config: textsearch configuration
35923fc7e8a6SThomas Graf  *
35933fc7e8a6SThomas Graf  * Finds a pattern in the skb data according to the specified
35943fc7e8a6SThomas Graf  * textsearch configuration. Use textsearch_next() to retrieve
35953fc7e8a6SThomas Graf  * subsequent occurrences of the pattern. Returns the offset
35963fc7e8a6SThomas Graf  * to the first occurrence or UINT_MAX if no match was found.
35973fc7e8a6SThomas Graf  */
35983fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3599059a2440SBojan Prtvar 			   unsigned int to, struct ts_config *config)
36003fc7e8a6SThomas Graf {
3601059a2440SBojan Prtvar 	struct ts_state state;
3602f72b948dSPhil Oester 	unsigned int ret;
3603f72b948dSPhil Oester 
36043fc7e8a6SThomas Graf 	config->get_next_block = skb_ts_get_next_block;
36053fc7e8a6SThomas Graf 	config->finish = skb_ts_finish;
36063fc7e8a6SThomas Graf 
3607059a2440SBojan Prtvar 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
36083fc7e8a6SThomas Graf 
3609059a2440SBojan Prtvar 	ret = textsearch_find(config, &state);
3610f72b948dSPhil Oester 	return (ret <= to - from ? ret : UINT_MAX);
36113fc7e8a6SThomas Graf }
3612b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text);
36133fc7e8a6SThomas Graf 
3614be12a1feSHannes Frederic Sowa int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3615be12a1feSHannes Frederic Sowa 			 int offset, size_t size)
3616be12a1feSHannes Frederic Sowa {
3617be12a1feSHannes Frederic Sowa 	int i = skb_shinfo(skb)->nr_frags;
3618be12a1feSHannes Frederic Sowa 
3619be12a1feSHannes Frederic Sowa 	if (skb_can_coalesce(skb, i, page, offset)) {
3620be12a1feSHannes Frederic Sowa 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3621be12a1feSHannes Frederic Sowa 	} else if (i < MAX_SKB_FRAGS) {
3622be12a1feSHannes Frederic Sowa 		get_page(page);
3623be12a1feSHannes Frederic Sowa 		skb_fill_page_desc(skb, i, page, offset, size);
3624be12a1feSHannes Frederic Sowa 	} else {
3625be12a1feSHannes Frederic Sowa 		return -EMSGSIZE;
3626be12a1feSHannes Frederic Sowa 	}
3627be12a1feSHannes Frederic Sowa 
3628be12a1feSHannes Frederic Sowa 	return 0;
3629be12a1feSHannes Frederic Sowa }
3630be12a1feSHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3631be12a1feSHannes Frederic Sowa 
3632cbb042f9SHerbert Xu /**
3633cbb042f9SHerbert Xu  *	skb_pull_rcsum - pull skb and update receive checksum
3634cbb042f9SHerbert Xu  *	@skb: buffer to update
3635cbb042f9SHerbert Xu  *	@len: length of data pulled
3636cbb042f9SHerbert Xu  *
3637cbb042f9SHerbert Xu  *	This function performs an skb_pull on the packet and updates
3638fee54fa5SUrs Thuermann  *	the CHECKSUM_COMPLETE checksum.  It should be used on
363984fa7933SPatrick McHardy  *	receive path processing instead of skb_pull unless you know
364084fa7933SPatrick McHardy  *	that the checksum difference is zero (e.g., a valid IP header)
364184fa7933SPatrick McHardy  *	or you are setting ip_summed to CHECKSUM_NONE.
3642cbb042f9SHerbert Xu  */
3643af72868bSJohannes Berg void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3644cbb042f9SHerbert Xu {
364531b33dfbSPravin B Shelar 	unsigned char *data = skb->data;
364631b33dfbSPravin B Shelar 
3647cbb042f9SHerbert Xu 	BUG_ON(len > skb->len);
364831b33dfbSPravin B Shelar 	__skb_pull(skb, len);
364931b33dfbSPravin B Shelar 	skb_postpull_rcsum(skb, data, len);
365031b33dfbSPravin B Shelar 	return skb->data;
3651cbb042f9SHerbert Xu }
3652f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3653f94691acSArnaldo Carvalho de Melo 
365413acc94eSYonghong Song static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
365513acc94eSYonghong Song {
365613acc94eSYonghong Song 	skb_frag_t head_frag;
365713acc94eSYonghong Song 	struct page *page;
365813acc94eSYonghong Song 
365913acc94eSYonghong Song 	page = virt_to_head_page(frag_skb->head);
3660d8e18a51SMatthew Wilcox (Oracle) 	__skb_frag_set_page(&head_frag, page);
3661b54c9d5bSJonathan Lemon 	skb_frag_off_set(&head_frag, frag_skb->data -
3662b54c9d5bSJonathan Lemon 			 (unsigned char *)page_address(page));
3663d8e18a51SMatthew Wilcox (Oracle) 	skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
366413acc94eSYonghong Song 	return head_frag;
366513acc94eSYonghong Song }
366613acc94eSYonghong Song 
36673a1296a3SSteffen Klassert struct sk_buff *skb_segment_list(struct sk_buff *skb,
36683a1296a3SSteffen Klassert 				 netdev_features_t features,
36693a1296a3SSteffen Klassert 				 unsigned int offset)
36703a1296a3SSteffen Klassert {
36713a1296a3SSteffen Klassert 	struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
36723a1296a3SSteffen Klassert 	unsigned int tnl_hlen = skb_tnl_header_len(skb);
36733a1296a3SSteffen Klassert 	unsigned int delta_truesize = 0;
36743a1296a3SSteffen Klassert 	unsigned int delta_len = 0;
36753a1296a3SSteffen Klassert 	struct sk_buff *tail = NULL;
367653475c5dSDongseok Yi 	struct sk_buff *nskb, *tmp;
367753475c5dSDongseok Yi 	int err;
36783a1296a3SSteffen Klassert 
36793a1296a3SSteffen Klassert 	skb_push(skb, -skb_network_offset(skb) + offset);
36803a1296a3SSteffen Klassert 
36813a1296a3SSteffen Klassert 	skb_shinfo(skb)->frag_list = NULL;
36823a1296a3SSteffen Klassert 
36833a1296a3SSteffen Klassert 	do {
36843a1296a3SSteffen Klassert 		nskb = list_skb;
36853a1296a3SSteffen Klassert 		list_skb = list_skb->next;
36863a1296a3SSteffen Klassert 
368753475c5dSDongseok Yi 		err = 0;
368853475c5dSDongseok Yi 		if (skb_shared(nskb)) {
368953475c5dSDongseok Yi 			tmp = skb_clone(nskb, GFP_ATOMIC);
369053475c5dSDongseok Yi 			if (tmp) {
369153475c5dSDongseok Yi 				consume_skb(nskb);
369253475c5dSDongseok Yi 				nskb = tmp;
369353475c5dSDongseok Yi 				err = skb_unclone(nskb, GFP_ATOMIC);
369453475c5dSDongseok Yi 			} else {
369553475c5dSDongseok Yi 				err = -ENOMEM;
369653475c5dSDongseok Yi 			}
369753475c5dSDongseok Yi 		}
369853475c5dSDongseok Yi 
36993a1296a3SSteffen Klassert 		if (!tail)
37003a1296a3SSteffen Klassert 			skb->next = nskb;
37013a1296a3SSteffen Klassert 		else
37023a1296a3SSteffen Klassert 			tail->next = nskb;
37033a1296a3SSteffen Klassert 
370453475c5dSDongseok Yi 		if (unlikely(err)) {
370553475c5dSDongseok Yi 			nskb->next = list_skb;
370653475c5dSDongseok Yi 			goto err_linearize;
370753475c5dSDongseok Yi 		}
370853475c5dSDongseok Yi 
37093a1296a3SSteffen Klassert 		tail = nskb;
37103a1296a3SSteffen Klassert 
37113a1296a3SSteffen Klassert 		delta_len += nskb->len;
37123a1296a3SSteffen Klassert 		delta_truesize += nskb->truesize;
37133a1296a3SSteffen Klassert 
37143a1296a3SSteffen Klassert 		skb_push(nskb, -skb_network_offset(nskb) + offset);
37153a1296a3SSteffen Klassert 
3716cf673ed0SFlorian Westphal 		skb_release_head_state(nskb);
37173a1296a3SSteffen Klassert 		 __copy_skb_header(nskb, skb);
37183a1296a3SSteffen Klassert 
37193a1296a3SSteffen Klassert 		skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
37203a1296a3SSteffen Klassert 		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
37213a1296a3SSteffen Klassert 						 nskb->data - tnl_hlen,
37223a1296a3SSteffen Klassert 						 offset + tnl_hlen);
37233a1296a3SSteffen Klassert 
37243a1296a3SSteffen Klassert 		if (skb_needs_linearize(nskb, features) &&
37253a1296a3SSteffen Klassert 		    __skb_linearize(nskb))
37263a1296a3SSteffen Klassert 			goto err_linearize;
37273a1296a3SSteffen Klassert 
37283a1296a3SSteffen Klassert 	} while (list_skb);
37293a1296a3SSteffen Klassert 
37303a1296a3SSteffen Klassert 	skb->truesize = skb->truesize - delta_truesize;
37313a1296a3SSteffen Klassert 	skb->data_len = skb->data_len - delta_len;
37323a1296a3SSteffen Klassert 	skb->len = skb->len - delta_len;
37333a1296a3SSteffen Klassert 
37343a1296a3SSteffen Klassert 	skb_gso_reset(skb);
37353a1296a3SSteffen Klassert 
37363a1296a3SSteffen Klassert 	skb->prev = tail;
37373a1296a3SSteffen Klassert 
37383a1296a3SSteffen Klassert 	if (skb_needs_linearize(skb, features) &&
37393a1296a3SSteffen Klassert 	    __skb_linearize(skb))
37403a1296a3SSteffen Klassert 		goto err_linearize;
37413a1296a3SSteffen Klassert 
37423a1296a3SSteffen Klassert 	skb_get(skb);
37433a1296a3SSteffen Klassert 
37443a1296a3SSteffen Klassert 	return skb;
37453a1296a3SSteffen Klassert 
37463a1296a3SSteffen Klassert err_linearize:
37473a1296a3SSteffen Klassert 	kfree_skb_list(skb->next);
37483a1296a3SSteffen Klassert 	skb->next = NULL;
37493a1296a3SSteffen Klassert 	return ERR_PTR(-ENOMEM);
37503a1296a3SSteffen Klassert }
37513a1296a3SSteffen Klassert EXPORT_SYMBOL_GPL(skb_segment_list);
37523a1296a3SSteffen Klassert 
37533a1296a3SSteffen Klassert int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
37543a1296a3SSteffen Klassert {
37553a1296a3SSteffen Klassert 	if (unlikely(p->len + skb->len >= 65536))
37563a1296a3SSteffen Klassert 		return -E2BIG;
37573a1296a3SSteffen Klassert 
37583a1296a3SSteffen Klassert 	if (NAPI_GRO_CB(p)->last == p)
37593a1296a3SSteffen Klassert 		skb_shinfo(p)->frag_list = skb;
37603a1296a3SSteffen Klassert 	else
37613a1296a3SSteffen Klassert 		NAPI_GRO_CB(p)->last->next = skb;
37623a1296a3SSteffen Klassert 
37633a1296a3SSteffen Klassert 	skb_pull(skb, skb_gro_offset(skb));
37643a1296a3SSteffen Klassert 
37653a1296a3SSteffen Klassert 	NAPI_GRO_CB(p)->last = skb;
37663a1296a3SSteffen Klassert 	NAPI_GRO_CB(p)->count++;
37673a1296a3SSteffen Klassert 	p->data_len += skb->len;
37683a1296a3SSteffen Klassert 	p->truesize += skb->truesize;
37693a1296a3SSteffen Klassert 	p->len += skb->len;
37703a1296a3SSteffen Klassert 
37713a1296a3SSteffen Klassert 	NAPI_GRO_CB(skb)->same_flow = 1;
37723a1296a3SSteffen Klassert 
37733a1296a3SSteffen Klassert 	return 0;
37743a1296a3SSteffen Klassert }
37753a1296a3SSteffen Klassert 
3776f4c50d99SHerbert Xu /**
3777f4c50d99SHerbert Xu  *	skb_segment - Perform protocol segmentation on skb.
3778df5771ffSMichael S. Tsirkin  *	@head_skb: buffer to segment
3779576a30ebSHerbert Xu  *	@features: features for the output path (see dev->features)
3780f4c50d99SHerbert Xu  *
3781f4c50d99SHerbert Xu  *	This function performs segmentation on the given skb.  It returns
37824c821d75SBen Hutchings  *	a pointer to the first in a list of new skbs for the segments.
37834c821d75SBen Hutchings  *	In case of error it returns ERR_PTR(err).
3784f4c50d99SHerbert Xu  */
3785df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb,
3786df5771ffSMichael S. Tsirkin 			    netdev_features_t features)
3787f4c50d99SHerbert Xu {
3788f4c50d99SHerbert Xu 	struct sk_buff *segs = NULL;
3789f4c50d99SHerbert Xu 	struct sk_buff *tail = NULL;
37901a4cedafSMichael S. Tsirkin 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3791df5771ffSMichael S. Tsirkin 	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3792df5771ffSMichael S. Tsirkin 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
3793df5771ffSMichael S. Tsirkin 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
37941fd819ecSMichael S. Tsirkin 	struct sk_buff *frag_skb = head_skb;
3795f4c50d99SHerbert Xu 	unsigned int offset = doffset;
3796df5771ffSMichael S. Tsirkin 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3797802ab55aSAlexander Duyck 	unsigned int partial_segs = 0;
3798f4c50d99SHerbert Xu 	unsigned int headroom;
3799802ab55aSAlexander Duyck 	unsigned int len = head_skb->len;
3800ec5f0615SPravin B Shelar 	__be16 proto;
380136c98382SAlexander Duyck 	bool csum, sg;
3802df5771ffSMichael S. Tsirkin 	int nfrags = skb_shinfo(head_skb)->nr_frags;
3803f4c50d99SHerbert Xu 	int err = -ENOMEM;
3804f4c50d99SHerbert Xu 	int i = 0;
3805f4c50d99SHerbert Xu 	int pos;
3806f4c50d99SHerbert Xu 
38073dcbdb13SShmulik Ladkani 	if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
38083dcbdb13SShmulik Ladkani 	    (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
38093dcbdb13SShmulik Ladkani 		/* gso_size is untrusted, and we have a frag_list with a linear
38103dcbdb13SShmulik Ladkani 		 * non head_frag head.
38113dcbdb13SShmulik Ladkani 		 *
38123dcbdb13SShmulik Ladkani 		 * (we assume checking the first list_skb member suffices;
38133dcbdb13SShmulik Ladkani 		 * i.e if either of the list_skb members have non head_frag
38143dcbdb13SShmulik Ladkani 		 * head, then the first one has too).
38153dcbdb13SShmulik Ladkani 		 *
38163dcbdb13SShmulik Ladkani 		 * If head_skb's headlen does not fit requested gso_size, it
38173dcbdb13SShmulik Ladkani 		 * means that the frag_list members do NOT terminate on exact
38183dcbdb13SShmulik Ladkani 		 * gso_size boundaries. Hence we cannot perform skb_frag_t page
38193dcbdb13SShmulik Ladkani 		 * sharing. Therefore we must fallback to copying the frag_list
38203dcbdb13SShmulik Ladkani 		 * skbs; we do so by disabling SG.
38213dcbdb13SShmulik Ladkani 		 */
38223dcbdb13SShmulik Ladkani 		if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
38233dcbdb13SShmulik Ladkani 			features &= ~NETIF_F_SG;
38243dcbdb13SShmulik Ladkani 	}
38253dcbdb13SShmulik Ladkani 
38265882a07cSWei-Chun Chao 	__skb_push(head_skb, doffset);
38272f631133SMiaohe Lin 	proto = skb_network_protocol(head_skb, NULL);
3828ec5f0615SPravin B Shelar 	if (unlikely(!proto))
3829ec5f0615SPravin B Shelar 		return ERR_PTR(-EINVAL);
3830ec5f0615SPravin B Shelar 
383136c98382SAlexander Duyck 	sg = !!(features & NETIF_F_SG);
3832f245d079SAlexander Duyck 	csum = !!can_checksum_protocol(features, proto);
38337e2b10c1STom Herbert 
383407b26c94SSteffen Klassert 	if (sg && csum && (mss != GSO_BY_FRAGS))  {
383507b26c94SSteffen Klassert 		if (!(features & NETIF_F_GSO_PARTIAL)) {
383607b26c94SSteffen Klassert 			struct sk_buff *iter;
383743170c4eSIlan Tayari 			unsigned int frag_len;
383807b26c94SSteffen Klassert 
383907b26c94SSteffen Klassert 			if (!list_skb ||
384007b26c94SSteffen Klassert 			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
384107b26c94SSteffen Klassert 				goto normal;
384207b26c94SSteffen Klassert 
384343170c4eSIlan Tayari 			/* If we get here then all the required
384443170c4eSIlan Tayari 			 * GSO features except frag_list are supported.
384543170c4eSIlan Tayari 			 * Try to split the SKB to multiple GSO SKBs
384643170c4eSIlan Tayari 			 * with no frag_list.
384743170c4eSIlan Tayari 			 * Currently we can do that only when the buffers don't
384843170c4eSIlan Tayari 			 * have a linear part and all the buffers except
384943170c4eSIlan Tayari 			 * the last are of the same length.
385007b26c94SSteffen Klassert 			 */
385143170c4eSIlan Tayari 			frag_len = list_skb->len;
385207b26c94SSteffen Klassert 			skb_walk_frags(head_skb, iter) {
385343170c4eSIlan Tayari 				if (frag_len != iter->len && iter->next)
385443170c4eSIlan Tayari 					goto normal;
3855eaffadbbSIlan Tayari 				if (skb_headlen(iter) && !iter->head_frag)
385607b26c94SSteffen Klassert 					goto normal;
385707b26c94SSteffen Klassert 
385807b26c94SSteffen Klassert 				len -= iter->len;
385907b26c94SSteffen Klassert 			}
386043170c4eSIlan Tayari 
386143170c4eSIlan Tayari 			if (len != frag_len)
386243170c4eSIlan Tayari 				goto normal;
386307b26c94SSteffen Klassert 		}
386407b26c94SSteffen Klassert 
3865802ab55aSAlexander Duyck 		/* GSO partial only requires that we trim off any excess that
3866802ab55aSAlexander Duyck 		 * doesn't fit into an MSS sized block, so take care of that
3867802ab55aSAlexander Duyck 		 * now.
3868802ab55aSAlexander Duyck 		 */
3869802ab55aSAlexander Duyck 		partial_segs = len / mss;
3870d7fb5a80SAlexander Duyck 		if (partial_segs > 1)
3871802ab55aSAlexander Duyck 			mss *= partial_segs;
3872d7fb5a80SAlexander Duyck 		else
3873d7fb5a80SAlexander Duyck 			partial_segs = 0;
3874802ab55aSAlexander Duyck 	}
3875802ab55aSAlexander Duyck 
387607b26c94SSteffen Klassert normal:
3877df5771ffSMichael S. Tsirkin 	headroom = skb_headroom(head_skb);
3878df5771ffSMichael S. Tsirkin 	pos = skb_headlen(head_skb);
3879f4c50d99SHerbert Xu 
3880f4c50d99SHerbert Xu 	do {
3881f4c50d99SHerbert Xu 		struct sk_buff *nskb;
38828cb19905SMichael S. Tsirkin 		skb_frag_t *nskb_frag;
3883c8884eddSHerbert Xu 		int hsize;
3884f4c50d99SHerbert Xu 		int size;
3885f4c50d99SHerbert Xu 
38863953c46cSMarcelo Ricardo Leitner 		if (unlikely(mss == GSO_BY_FRAGS)) {
38873953c46cSMarcelo Ricardo Leitner 			len = list_skb->len;
38883953c46cSMarcelo Ricardo Leitner 		} else {
3889df5771ffSMichael S. Tsirkin 			len = head_skb->len - offset;
3890f4c50d99SHerbert Xu 			if (len > mss)
3891f4c50d99SHerbert Xu 				len = mss;
38923953c46cSMarcelo Ricardo Leitner 		}
3893f4c50d99SHerbert Xu 
3894df5771ffSMichael S. Tsirkin 		hsize = skb_headlen(head_skb) - offset;
3895f4c50d99SHerbert Xu 		if (hsize < 0)
3896f4c50d99SHerbert Xu 			hsize = 0;
3897c8884eddSHerbert Xu 		if (hsize > len || !sg)
3898c8884eddSHerbert Xu 			hsize = len;
3899f4c50d99SHerbert Xu 
39001a4cedafSMichael S. Tsirkin 		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
39011a4cedafSMichael S. Tsirkin 		    (skb_headlen(list_skb) == len || sg)) {
39021a4cedafSMichael S. Tsirkin 			BUG_ON(skb_headlen(list_skb) > len);
390389319d38SHerbert Xu 
39049d8506ccSHerbert Xu 			i = 0;
39051a4cedafSMichael S. Tsirkin 			nfrags = skb_shinfo(list_skb)->nr_frags;
39061a4cedafSMichael S. Tsirkin 			frag = skb_shinfo(list_skb)->frags;
39071fd819ecSMichael S. Tsirkin 			frag_skb = list_skb;
39081a4cedafSMichael S. Tsirkin 			pos += skb_headlen(list_skb);
39099d8506ccSHerbert Xu 
39109d8506ccSHerbert Xu 			while (pos < offset + len) {
39119d8506ccSHerbert Xu 				BUG_ON(i >= nfrags);
39129d8506ccSHerbert Xu 
39134e1beba1SMichael S. Tsirkin 				size = skb_frag_size(frag);
39149d8506ccSHerbert Xu 				if (pos + size > offset + len)
39159d8506ccSHerbert Xu 					break;
39169d8506ccSHerbert Xu 
39179d8506ccSHerbert Xu 				i++;
39189d8506ccSHerbert Xu 				pos += size;
39194e1beba1SMichael S. Tsirkin 				frag++;
39209d8506ccSHerbert Xu 			}
39219d8506ccSHerbert Xu 
39221a4cedafSMichael S. Tsirkin 			nskb = skb_clone(list_skb, GFP_ATOMIC);
39231a4cedafSMichael S. Tsirkin 			list_skb = list_skb->next;
392489319d38SHerbert Xu 
3925f4c50d99SHerbert Xu 			if (unlikely(!nskb))
3926f4c50d99SHerbert Xu 				goto err;
3927f4c50d99SHerbert Xu 
39289d8506ccSHerbert Xu 			if (unlikely(pskb_trim(nskb, len))) {
39299d8506ccSHerbert Xu 				kfree_skb(nskb);
39309d8506ccSHerbert Xu 				goto err;
39319d8506ccSHerbert Xu 			}
39329d8506ccSHerbert Xu 
3933ec47ea82SAlexander Duyck 			hsize = skb_end_offset(nskb);
393489319d38SHerbert Xu 			if (skb_cow_head(nskb, doffset + headroom)) {
393589319d38SHerbert Xu 				kfree_skb(nskb);
393689319d38SHerbert Xu 				goto err;
393789319d38SHerbert Xu 			}
393889319d38SHerbert Xu 
3939ec47ea82SAlexander Duyck 			nskb->truesize += skb_end_offset(nskb) - hsize;
394089319d38SHerbert Xu 			skb_release_head_state(nskb);
394189319d38SHerbert Xu 			__skb_push(nskb, doffset);
394289319d38SHerbert Xu 		} else {
3943c93bdd0eSMel Gorman 			nskb = __alloc_skb(hsize + doffset + headroom,
3944df5771ffSMichael S. Tsirkin 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3945c93bdd0eSMel Gorman 					   NUMA_NO_NODE);
394689319d38SHerbert Xu 
394789319d38SHerbert Xu 			if (unlikely(!nskb))
394889319d38SHerbert Xu 				goto err;
394989319d38SHerbert Xu 
395089319d38SHerbert Xu 			skb_reserve(nskb, headroom);
395189319d38SHerbert Xu 			__skb_put(nskb, doffset);
395289319d38SHerbert Xu 		}
395389319d38SHerbert Xu 
3954f4c50d99SHerbert Xu 		if (segs)
3955f4c50d99SHerbert Xu 			tail->next = nskb;
3956f4c50d99SHerbert Xu 		else
3957f4c50d99SHerbert Xu 			segs = nskb;
3958f4c50d99SHerbert Xu 		tail = nskb;
3959f4c50d99SHerbert Xu 
3960df5771ffSMichael S. Tsirkin 		__copy_skb_header(nskb, head_skb);
3961f4c50d99SHerbert Xu 
3962030737bcSEric Dumazet 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3963fcdfe3a7SVlad Yasevich 		skb_reset_mac_len(nskb);
396468c33163SPravin B Shelar 
3965df5771ffSMichael S. Tsirkin 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
396668c33163SPravin B Shelar 						 nskb->data - tnl_hlen,
396768c33163SPravin B Shelar 						 doffset + tnl_hlen);
396889319d38SHerbert Xu 
39699d8506ccSHerbert Xu 		if (nskb->len == len + doffset)
39701cdbcb79SSimon Horman 			goto perform_csum_check;
397189319d38SHerbert Xu 
39727fbeffedSAlexander Duyck 		if (!sg) {
39731454c9faSYadu Kishore 			if (!csum) {
39747fbeffedSAlexander Duyck 				if (!nskb->remcsum_offload)
39756f85a124SHerbert Xu 					nskb->ip_summed = CHECKSUM_NONE;
397676443456SAlexander Duyck 				SKB_GSO_CB(nskb)->csum =
397776443456SAlexander Duyck 					skb_copy_and_csum_bits(head_skb, offset,
39781454c9faSYadu Kishore 							       skb_put(nskb,
39791454c9faSYadu Kishore 								       len),
39808d5930dfSAl Viro 							       len);
39817e2b10c1STom Herbert 				SKB_GSO_CB(nskb)->csum_start =
3982de843723STom Herbert 					skb_headroom(nskb) + doffset;
39831454c9faSYadu Kishore 			} else {
39841454c9faSYadu Kishore 				skb_copy_bits(head_skb, offset,
39851454c9faSYadu Kishore 					      skb_put(nskb, len),
39861454c9faSYadu Kishore 					      len);
39871454c9faSYadu Kishore 			}
3988f4c50d99SHerbert Xu 			continue;
3989f4c50d99SHerbert Xu 		}
3990f4c50d99SHerbert Xu 
39918cb19905SMichael S. Tsirkin 		nskb_frag = skb_shinfo(nskb)->frags;
3992f4c50d99SHerbert Xu 
3993df5771ffSMichael S. Tsirkin 		skb_copy_from_linear_data_offset(head_skb, offset,
3994d626f62bSArnaldo Carvalho de Melo 						 skb_put(nskb, hsize), hsize);
3995f4c50d99SHerbert Xu 
3996fff88030SWillem de Bruijn 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3997df5771ffSMichael S. Tsirkin 					      SKBTX_SHARED_FRAG;
3998cef401deSEric Dumazet 
3999bf5c25d6SWillem de Bruijn 		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4000bf5c25d6SWillem de Bruijn 		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4001bf5c25d6SWillem de Bruijn 			goto err;
4002bf5c25d6SWillem de Bruijn 
40039d8506ccSHerbert Xu 		while (pos < offset + len) {
40049d8506ccSHerbert Xu 			if (i >= nfrags) {
40059d8506ccSHerbert Xu 				i = 0;
40061a4cedafSMichael S. Tsirkin 				nfrags = skb_shinfo(list_skb)->nr_frags;
40071a4cedafSMichael S. Tsirkin 				frag = skb_shinfo(list_skb)->frags;
40081fd819ecSMichael S. Tsirkin 				frag_skb = list_skb;
400913acc94eSYonghong Song 				if (!skb_headlen(list_skb)) {
40109d8506ccSHerbert Xu 					BUG_ON(!nfrags);
401113acc94eSYonghong Song 				} else {
401213acc94eSYonghong Song 					BUG_ON(!list_skb->head_frag);
40139d8506ccSHerbert Xu 
401413acc94eSYonghong Song 					/* to make room for head_frag. */
401513acc94eSYonghong Song 					i--;
401613acc94eSYonghong Song 					frag--;
401713acc94eSYonghong Song 				}
4018bf5c25d6SWillem de Bruijn 				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4019bf5c25d6SWillem de Bruijn 				    skb_zerocopy_clone(nskb, frag_skb,
4020bf5c25d6SWillem de Bruijn 						       GFP_ATOMIC))
4021bf5c25d6SWillem de Bruijn 					goto err;
4022bf5c25d6SWillem de Bruijn 
40231a4cedafSMichael S. Tsirkin 				list_skb = list_skb->next;
40249d8506ccSHerbert Xu 			}
40259d8506ccSHerbert Xu 
40269d8506ccSHerbert Xu 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
40279d8506ccSHerbert Xu 				     MAX_SKB_FRAGS)) {
40289d8506ccSHerbert Xu 				net_warn_ratelimited(
40299d8506ccSHerbert Xu 					"skb_segment: too many frags: %u %u\n",
40309d8506ccSHerbert Xu 					pos, mss);
4031ff907a11SEric Dumazet 				err = -EINVAL;
40329d8506ccSHerbert Xu 				goto err;
40339d8506ccSHerbert Xu 			}
40349d8506ccSHerbert Xu 
403513acc94eSYonghong Song 			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
40368cb19905SMichael S. Tsirkin 			__skb_frag_ref(nskb_frag);
40378cb19905SMichael S. Tsirkin 			size = skb_frag_size(nskb_frag);
4038f4c50d99SHerbert Xu 
4039f4c50d99SHerbert Xu 			if (pos < offset) {
4040b54c9d5bSJonathan Lemon 				skb_frag_off_add(nskb_frag, offset - pos);
40418cb19905SMichael S. Tsirkin 				skb_frag_size_sub(nskb_frag, offset - pos);
4042f4c50d99SHerbert Xu 			}
4043f4c50d99SHerbert Xu 
404489319d38SHerbert Xu 			skb_shinfo(nskb)->nr_frags++;
4045f4c50d99SHerbert Xu 
4046f4c50d99SHerbert Xu 			if (pos + size <= offset + len) {
4047f4c50d99SHerbert Xu 				i++;
40484e1beba1SMichael S. Tsirkin 				frag++;
4049f4c50d99SHerbert Xu 				pos += size;
4050f4c50d99SHerbert Xu 			} else {
40518cb19905SMichael S. Tsirkin 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
405289319d38SHerbert Xu 				goto skip_fraglist;
4053f4c50d99SHerbert Xu 			}
4054f4c50d99SHerbert Xu 
40558cb19905SMichael S. Tsirkin 			nskb_frag++;
4056f4c50d99SHerbert Xu 		}
4057f4c50d99SHerbert Xu 
405889319d38SHerbert Xu skip_fraglist:
4059f4c50d99SHerbert Xu 		nskb->data_len = len - hsize;
4060f4c50d99SHerbert Xu 		nskb->len += nskb->data_len;
4061f4c50d99SHerbert Xu 		nskb->truesize += nskb->data_len;
4062ec5f0615SPravin B Shelar 
40631cdbcb79SSimon Horman perform_csum_check:
40647fbeffedSAlexander Duyck 		if (!csum) {
4065ff907a11SEric Dumazet 			if (skb_has_shared_frag(nskb) &&
4066ff907a11SEric Dumazet 			    __skb_linearize(nskb))
4067ddff00d4SAlexander Duyck 				goto err;
4068ff907a11SEric Dumazet 
40697fbeffedSAlexander Duyck 			if (!nskb->remcsum_offload)
4070ec5f0615SPravin B Shelar 				nskb->ip_summed = CHECKSUM_NONE;
407176443456SAlexander Duyck 			SKB_GSO_CB(nskb)->csum =
407276443456SAlexander Duyck 				skb_checksum(nskb, doffset,
407376443456SAlexander Duyck 					     nskb->len - doffset, 0);
40747e2b10c1STom Herbert 			SKB_GSO_CB(nskb)->csum_start =
40757e2b10c1STom Herbert 				skb_headroom(nskb) + doffset;
4076ec5f0615SPravin B Shelar 		}
4077df5771ffSMichael S. Tsirkin 	} while ((offset += len) < head_skb->len);
4078f4c50d99SHerbert Xu 
4079bec3cfdcSEric Dumazet 	/* Some callers want to get the end of the list.
4080bec3cfdcSEric Dumazet 	 * Put it in segs->prev to avoid walking the list.
4081bec3cfdcSEric Dumazet 	 * (see validate_xmit_skb_list() for example)
4082bec3cfdcSEric Dumazet 	 */
4083bec3cfdcSEric Dumazet 	segs->prev = tail;
4084432c856fSToshiaki Makita 
4085802ab55aSAlexander Duyck 	if (partial_segs) {
408607b26c94SSteffen Klassert 		struct sk_buff *iter;
4087802ab55aSAlexander Duyck 		int type = skb_shinfo(head_skb)->gso_type;
408807b26c94SSteffen Klassert 		unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4089802ab55aSAlexander Duyck 
4090802ab55aSAlexander Duyck 		/* Update type to add partial and then remove dodgy if set */
409107b26c94SSteffen Klassert 		type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4092802ab55aSAlexander Duyck 		type &= ~SKB_GSO_DODGY;
4093802ab55aSAlexander Duyck 
4094802ab55aSAlexander Duyck 		/* Update GSO info and prepare to start updating headers on
4095802ab55aSAlexander Duyck 		 * our way back down the stack of protocols.
4096802ab55aSAlexander Duyck 		 */
409707b26c94SSteffen Klassert 		for (iter = segs; iter; iter = iter->next) {
409807b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_size = gso_size;
409907b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_segs = partial_segs;
410007b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_type = type;
410107b26c94SSteffen Klassert 			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
410207b26c94SSteffen Klassert 		}
410307b26c94SSteffen Klassert 
410407b26c94SSteffen Klassert 		if (tail->len - doffset <= gso_size)
410507b26c94SSteffen Klassert 			skb_shinfo(tail)->gso_size = 0;
410607b26c94SSteffen Klassert 		else if (tail != segs)
410707b26c94SSteffen Klassert 			skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4108802ab55aSAlexander Duyck 	}
4109802ab55aSAlexander Duyck 
4110432c856fSToshiaki Makita 	/* Following permits correct backpressure, for protocols
4111432c856fSToshiaki Makita 	 * using skb_set_owner_w().
4112432c856fSToshiaki Makita 	 * Idea is to tranfert ownership from head_skb to last segment.
4113432c856fSToshiaki Makita 	 */
4114432c856fSToshiaki Makita 	if (head_skb->destructor == sock_wfree) {
4115432c856fSToshiaki Makita 		swap(tail->truesize, head_skb->truesize);
4116432c856fSToshiaki Makita 		swap(tail->destructor, head_skb->destructor);
4117432c856fSToshiaki Makita 		swap(tail->sk, head_skb->sk);
4118432c856fSToshiaki Makita 	}
4119f4c50d99SHerbert Xu 	return segs;
4120f4c50d99SHerbert Xu 
4121f4c50d99SHerbert Xu err:
4122289dccbeSEric Dumazet 	kfree_skb_list(segs);
4123f4c50d99SHerbert Xu 	return ERR_PTR(err);
4124f4c50d99SHerbert Xu }
4125f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment);
4126f4c50d99SHerbert Xu 
4127d4546c25SDavid Miller int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
412871d93b39SHerbert Xu {
41298a29111cSEric Dumazet 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
413067147ba9SHerbert Xu 	unsigned int offset = skb_gro_offset(skb);
413167147ba9SHerbert Xu 	unsigned int headlen = skb_headlen(skb);
41328a29111cSEric Dumazet 	unsigned int len = skb_gro_len(skb);
4133715dc1f3SEric Dumazet 	unsigned int delta_truesize;
4134d4546c25SDavid Miller 	struct sk_buff *lp;
413571d93b39SHerbert Xu 
41360ab03f35SSteffen Klassert 	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
413771d93b39SHerbert Xu 		return -E2BIG;
413871d93b39SHerbert Xu 
413929e98242SEric Dumazet 	lp = NAPI_GRO_CB(p)->last;
41408a29111cSEric Dumazet 	pinfo = skb_shinfo(lp);
41418a29111cSEric Dumazet 
41428a29111cSEric Dumazet 	if (headlen <= offset) {
414342da6994SHerbert Xu 		skb_frag_t *frag;
414466e92fcfSHerbert Xu 		skb_frag_t *frag2;
41459aaa156cSHerbert Xu 		int i = skbinfo->nr_frags;
41469aaa156cSHerbert Xu 		int nr_frags = pinfo->nr_frags + i;
414742da6994SHerbert Xu 
414866e92fcfSHerbert Xu 		if (nr_frags > MAX_SKB_FRAGS)
41498a29111cSEric Dumazet 			goto merge;
415081705ad1SHerbert Xu 
41518a29111cSEric Dumazet 		offset -= headlen;
41529aaa156cSHerbert Xu 		pinfo->nr_frags = nr_frags;
41539aaa156cSHerbert Xu 		skbinfo->nr_frags = 0;
4154f5572068SHerbert Xu 
41559aaa156cSHerbert Xu 		frag = pinfo->frags + nr_frags;
41569aaa156cSHerbert Xu 		frag2 = skbinfo->frags + i;
415766e92fcfSHerbert Xu 		do {
415866e92fcfSHerbert Xu 			*--frag = *--frag2;
415966e92fcfSHerbert Xu 		} while (--i);
416066e92fcfSHerbert Xu 
4161b54c9d5bSJonathan Lemon 		skb_frag_off_add(frag, offset);
41629e903e08SEric Dumazet 		skb_frag_size_sub(frag, offset);
416366e92fcfSHerbert Xu 
4164715dc1f3SEric Dumazet 		/* all fragments truesize : remove (head size + sk_buff) */
4165ec47ea82SAlexander Duyck 		delta_truesize = skb->truesize -
4166ec47ea82SAlexander Duyck 				 SKB_TRUESIZE(skb_end_offset(skb));
4167715dc1f3SEric Dumazet 
4168f5572068SHerbert Xu 		skb->truesize -= skb->data_len;
4169f5572068SHerbert Xu 		skb->len -= skb->data_len;
4170f5572068SHerbert Xu 		skb->data_len = 0;
4171f5572068SHerbert Xu 
4172715dc1f3SEric Dumazet 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
41735d38a079SHerbert Xu 		goto done;
4174d7e8883cSEric Dumazet 	} else if (skb->head_frag) {
4175d7e8883cSEric Dumazet 		int nr_frags = pinfo->nr_frags;
4176d7e8883cSEric Dumazet 		skb_frag_t *frag = pinfo->frags + nr_frags;
4177d7e8883cSEric Dumazet 		struct page *page = virt_to_head_page(skb->head);
4178d7e8883cSEric Dumazet 		unsigned int first_size = headlen - offset;
4179d7e8883cSEric Dumazet 		unsigned int first_offset;
4180d7e8883cSEric Dumazet 
4181d7e8883cSEric Dumazet 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
41828a29111cSEric Dumazet 			goto merge;
4183d7e8883cSEric Dumazet 
4184d7e8883cSEric Dumazet 		first_offset = skb->data -
4185d7e8883cSEric Dumazet 			       (unsigned char *)page_address(page) +
4186d7e8883cSEric Dumazet 			       offset;
4187d7e8883cSEric Dumazet 
4188d7e8883cSEric Dumazet 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
4189d7e8883cSEric Dumazet 
4190d8e18a51SMatthew Wilcox (Oracle) 		__skb_frag_set_page(frag, page);
4191b54c9d5bSJonathan Lemon 		skb_frag_off_set(frag, first_offset);
4192d7e8883cSEric Dumazet 		skb_frag_size_set(frag, first_size);
4193d7e8883cSEric Dumazet 
4194d7e8883cSEric Dumazet 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
4195d7e8883cSEric Dumazet 		/* We dont need to clear skbinfo->nr_frags here */
4196d7e8883cSEric Dumazet 
4197715dc1f3SEric Dumazet 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4198d7e8883cSEric Dumazet 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
4199d7e8883cSEric Dumazet 		goto done;
42008a29111cSEric Dumazet 	}
420171d93b39SHerbert Xu 
420271d93b39SHerbert Xu merge:
4203715dc1f3SEric Dumazet 	delta_truesize = skb->truesize;
420467147ba9SHerbert Xu 	if (offset > headlen) {
4205d1dc7abfSMichal Schmidt 		unsigned int eat = offset - headlen;
4206d1dc7abfSMichal Schmidt 
4207b54c9d5bSJonathan Lemon 		skb_frag_off_add(&skbinfo->frags[0], eat);
42089e903e08SEric Dumazet 		skb_frag_size_sub(&skbinfo->frags[0], eat);
4209d1dc7abfSMichal Schmidt 		skb->data_len -= eat;
4210d1dc7abfSMichal Schmidt 		skb->len -= eat;
421167147ba9SHerbert Xu 		offset = headlen;
421256035022SHerbert Xu 	}
421356035022SHerbert Xu 
421467147ba9SHerbert Xu 	__skb_pull(skb, offset);
421556035022SHerbert Xu 
421629e98242SEric Dumazet 	if (NAPI_GRO_CB(p)->last == p)
42178a29111cSEric Dumazet 		skb_shinfo(p)->frag_list = skb;
42188a29111cSEric Dumazet 	else
4219c3c7c254SEric Dumazet 		NAPI_GRO_CB(p)->last->next = skb;
4220c3c7c254SEric Dumazet 	NAPI_GRO_CB(p)->last = skb;
4221f4a775d1SEric Dumazet 	__skb_header_release(skb);
42228a29111cSEric Dumazet 	lp = p;
422371d93b39SHerbert Xu 
42245d38a079SHerbert Xu done:
42255d38a079SHerbert Xu 	NAPI_GRO_CB(p)->count++;
422637fe4732SHerbert Xu 	p->data_len += len;
4227715dc1f3SEric Dumazet 	p->truesize += delta_truesize;
422837fe4732SHerbert Xu 	p->len += len;
42298a29111cSEric Dumazet 	if (lp != p) {
42308a29111cSEric Dumazet 		lp->data_len += len;
42318a29111cSEric Dumazet 		lp->truesize += delta_truesize;
42328a29111cSEric Dumazet 		lp->len += len;
42338a29111cSEric Dumazet 	}
423471d93b39SHerbert Xu 	NAPI_GRO_CB(skb)->same_flow = 1;
423571d93b39SHerbert Xu 	return 0;
423671d93b39SHerbert Xu }
423771d93b39SHerbert Xu 
4238df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
4239df5042f4SFlorian Westphal #define SKB_EXT_ALIGN_VALUE	8
4240df5042f4SFlorian Westphal #define SKB_EXT_CHUNKSIZEOF(x)	(ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4241df5042f4SFlorian Westphal 
4242df5042f4SFlorian Westphal static const u8 skb_ext_type_len[] = {
4243df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4244df5042f4SFlorian Westphal 	[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4245df5042f4SFlorian Westphal #endif
42464165079bSFlorian Westphal #ifdef CONFIG_XFRM
42474165079bSFlorian Westphal 	[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
42484165079bSFlorian Westphal #endif
424995a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
425095a7233cSPaul Blakey 	[TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
425195a7233cSPaul Blakey #endif
42523ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP)
42533ee17bc7SMat Martineau 	[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
42543ee17bc7SMat Martineau #endif
4255df5042f4SFlorian Westphal };
4256df5042f4SFlorian Westphal 
4257df5042f4SFlorian Westphal static __always_inline unsigned int skb_ext_total_length(void)
4258df5042f4SFlorian Westphal {
4259df5042f4SFlorian Westphal 	return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4260df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4261df5042f4SFlorian Westphal 		skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4262df5042f4SFlorian Westphal #endif
42634165079bSFlorian Westphal #ifdef CONFIG_XFRM
42644165079bSFlorian Westphal 		skb_ext_type_len[SKB_EXT_SEC_PATH] +
42654165079bSFlorian Westphal #endif
426695a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
426795a7233cSPaul Blakey 		skb_ext_type_len[TC_SKB_EXT] +
426895a7233cSPaul Blakey #endif
42693ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP)
42703ee17bc7SMat Martineau 		skb_ext_type_len[SKB_EXT_MPTCP] +
42713ee17bc7SMat Martineau #endif
4272df5042f4SFlorian Westphal 		0;
4273df5042f4SFlorian Westphal }
4274df5042f4SFlorian Westphal 
4275df5042f4SFlorian Westphal static void skb_extensions_init(void)
4276df5042f4SFlorian Westphal {
4277df5042f4SFlorian Westphal 	BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4278df5042f4SFlorian Westphal 	BUILD_BUG_ON(skb_ext_total_length() > 255);
4279df5042f4SFlorian Westphal 
4280df5042f4SFlorian Westphal 	skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4281df5042f4SFlorian Westphal 					     SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4282df5042f4SFlorian Westphal 					     0,
4283df5042f4SFlorian Westphal 					     SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4284df5042f4SFlorian Westphal 					     NULL);
4285df5042f4SFlorian Westphal }
4286df5042f4SFlorian Westphal #else
4287df5042f4SFlorian Westphal static void skb_extensions_init(void) {}
4288df5042f4SFlorian Westphal #endif
4289df5042f4SFlorian Westphal 
42901da177e4SLinus Torvalds void __init skb_init(void)
42911da177e4SLinus Torvalds {
429279a8a642SKees Cook 	skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
42931da177e4SLinus Torvalds 					      sizeof(struct sk_buff),
42941da177e4SLinus Torvalds 					      0,
4295e5d679f3SAlexey Dobriyan 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
429679a8a642SKees Cook 					      offsetof(struct sk_buff, cb),
429779a8a642SKees Cook 					      sizeof_field(struct sk_buff, cb),
429820c2df83SPaul Mundt 					      NULL);
4299d179cd12SDavid S. Miller 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4300d0bf4a9eSEric Dumazet 						sizeof(struct sk_buff_fclones),
4301d179cd12SDavid S. Miller 						0,
4302e5d679f3SAlexey Dobriyan 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
430320c2df83SPaul Mundt 						NULL);
4304df5042f4SFlorian Westphal 	skb_extensions_init();
43051da177e4SLinus Torvalds }
43061da177e4SLinus Torvalds 
430751c739d1SDavid S. Miller static int
430848a1df65SJason A. Donenfeld __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
430948a1df65SJason A. Donenfeld 	       unsigned int recursion_level)
4310716ea3a7SDavid Howells {
43111a028e50SDavid S. Miller 	int start = skb_headlen(skb);
43121a028e50SDavid S. Miller 	int i, copy = start - offset;
4313fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
4314716ea3a7SDavid Howells 	int elt = 0;
4315716ea3a7SDavid Howells 
431648a1df65SJason A. Donenfeld 	if (unlikely(recursion_level >= 24))
431748a1df65SJason A. Donenfeld 		return -EMSGSIZE;
431848a1df65SJason A. Donenfeld 
4319716ea3a7SDavid Howells 	if (copy > 0) {
4320716ea3a7SDavid Howells 		if (copy > len)
4321716ea3a7SDavid Howells 			copy = len;
4322642f1490SJens Axboe 		sg_set_buf(sg, skb->data + offset, copy);
4323716ea3a7SDavid Howells 		elt++;
4324716ea3a7SDavid Howells 		if ((len -= copy) == 0)
4325716ea3a7SDavid Howells 			return elt;
4326716ea3a7SDavid Howells 		offset += copy;
4327716ea3a7SDavid Howells 	}
4328716ea3a7SDavid Howells 
4329716ea3a7SDavid Howells 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
43301a028e50SDavid S. Miller 		int end;
4331716ea3a7SDavid Howells 
4332547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
43331a028e50SDavid S. Miller 
43349e903e08SEric Dumazet 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4335716ea3a7SDavid Howells 		if ((copy = end - offset) > 0) {
4336716ea3a7SDavid Howells 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
433748a1df65SJason A. Donenfeld 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
433848a1df65SJason A. Donenfeld 				return -EMSGSIZE;
4339716ea3a7SDavid Howells 
4340716ea3a7SDavid Howells 			if (copy > len)
4341716ea3a7SDavid Howells 				copy = len;
4342ea2ab693SIan Campbell 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4343b54c9d5bSJonathan Lemon 				    skb_frag_off(frag) + offset - start);
4344716ea3a7SDavid Howells 			elt++;
4345716ea3a7SDavid Howells 			if (!(len -= copy))
4346716ea3a7SDavid Howells 				return elt;
4347716ea3a7SDavid Howells 			offset += copy;
4348716ea3a7SDavid Howells 		}
43491a028e50SDavid S. Miller 		start = end;
4350716ea3a7SDavid Howells 	}
4351716ea3a7SDavid Howells 
4352fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
435348a1df65SJason A. Donenfeld 		int end, ret;
4354716ea3a7SDavid Howells 
4355547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
43561a028e50SDavid S. Miller 
4357fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
4358716ea3a7SDavid Howells 		if ((copy = end - offset) > 0) {
435948a1df65SJason A. Donenfeld 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
436048a1df65SJason A. Donenfeld 				return -EMSGSIZE;
436148a1df65SJason A. Donenfeld 
4362716ea3a7SDavid Howells 			if (copy > len)
4363716ea3a7SDavid Howells 				copy = len;
436448a1df65SJason A. Donenfeld 			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
436548a1df65SJason A. Donenfeld 					      copy, recursion_level + 1);
436648a1df65SJason A. Donenfeld 			if (unlikely(ret < 0))
436748a1df65SJason A. Donenfeld 				return ret;
436848a1df65SJason A. Donenfeld 			elt += ret;
4369716ea3a7SDavid Howells 			if ((len -= copy) == 0)
4370716ea3a7SDavid Howells 				return elt;
4371716ea3a7SDavid Howells 			offset += copy;
4372716ea3a7SDavid Howells 		}
43731a028e50SDavid S. Miller 		start = end;
4374716ea3a7SDavid Howells 	}
4375716ea3a7SDavid Howells 	BUG_ON(len);
4376716ea3a7SDavid Howells 	return elt;
4377716ea3a7SDavid Howells }
4378716ea3a7SDavid Howells 
437948a1df65SJason A. Donenfeld /**
438048a1df65SJason A. Donenfeld  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
438148a1df65SJason A. Donenfeld  *	@skb: Socket buffer containing the buffers to be mapped
438248a1df65SJason A. Donenfeld  *	@sg: The scatter-gather list to map into
438348a1df65SJason A. Donenfeld  *	@offset: The offset into the buffer's contents to start mapping
438448a1df65SJason A. Donenfeld  *	@len: Length of buffer space to be mapped
438548a1df65SJason A. Donenfeld  *
438648a1df65SJason A. Donenfeld  *	Fill the specified scatter-gather list with mappings/pointers into a
438748a1df65SJason A. Donenfeld  *	region of the buffer space attached to a socket buffer. Returns either
438848a1df65SJason A. Donenfeld  *	the number of scatterlist items used, or -EMSGSIZE if the contents
438948a1df65SJason A. Donenfeld  *	could not fit.
439048a1df65SJason A. Donenfeld  */
439148a1df65SJason A. Donenfeld int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
439248a1df65SJason A. Donenfeld {
439348a1df65SJason A. Donenfeld 	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
439448a1df65SJason A. Donenfeld 
439548a1df65SJason A. Donenfeld 	if (nsg <= 0)
439648a1df65SJason A. Donenfeld 		return nsg;
439748a1df65SJason A. Donenfeld 
439848a1df65SJason A. Donenfeld 	sg_mark_end(&sg[nsg - 1]);
439948a1df65SJason A. Donenfeld 
440048a1df65SJason A. Donenfeld 	return nsg;
440148a1df65SJason A. Donenfeld }
440248a1df65SJason A. Donenfeld EXPORT_SYMBOL_GPL(skb_to_sgvec);
440348a1df65SJason A. Donenfeld 
440425a91d8dSFan Du /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
440525a91d8dSFan Du  * sglist without mark the sg which contain last skb data as the end.
440625a91d8dSFan Du  * So the caller can mannipulate sg list as will when padding new data after
440725a91d8dSFan Du  * the first call without calling sg_unmark_end to expend sg list.
440825a91d8dSFan Du  *
440925a91d8dSFan Du  * Scenario to use skb_to_sgvec_nomark:
441025a91d8dSFan Du  * 1. sg_init_table
441125a91d8dSFan Du  * 2. skb_to_sgvec_nomark(payload1)
441225a91d8dSFan Du  * 3. skb_to_sgvec_nomark(payload2)
441325a91d8dSFan Du  *
441425a91d8dSFan Du  * This is equivalent to:
441525a91d8dSFan Du  * 1. sg_init_table
441625a91d8dSFan Du  * 2. skb_to_sgvec(payload1)
441725a91d8dSFan Du  * 3. sg_unmark_end
441825a91d8dSFan Du  * 4. skb_to_sgvec(payload2)
441925a91d8dSFan Du  *
442025a91d8dSFan Du  * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
442125a91d8dSFan Du  * is more preferable.
442225a91d8dSFan Du  */
442325a91d8dSFan Du int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
442425a91d8dSFan Du 			int offset, int len)
442525a91d8dSFan Du {
442648a1df65SJason A. Donenfeld 	return __skb_to_sgvec(skb, sg, offset, len, 0);
442725a91d8dSFan Du }
442825a91d8dSFan Du EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
442925a91d8dSFan Du 
443051c739d1SDavid S. Miller 
443151c739d1SDavid S. Miller 
4432716ea3a7SDavid Howells /**
4433716ea3a7SDavid Howells  *	skb_cow_data - Check that a socket buffer's data buffers are writable
4434716ea3a7SDavid Howells  *	@skb: The socket buffer to check.
4435716ea3a7SDavid Howells  *	@tailbits: Amount of trailing space to be added
4436716ea3a7SDavid Howells  *	@trailer: Returned pointer to the skb where the @tailbits space begins
4437716ea3a7SDavid Howells  *
4438716ea3a7SDavid Howells  *	Make sure that the data buffers attached to a socket buffer are
4439716ea3a7SDavid Howells  *	writable. If they are not, private copies are made of the data buffers
4440716ea3a7SDavid Howells  *	and the socket buffer is set to use these instead.
4441716ea3a7SDavid Howells  *
4442716ea3a7SDavid Howells  *	If @tailbits is given, make sure that there is space to write @tailbits
4443716ea3a7SDavid Howells  *	bytes of data beyond current end of socket buffer.  @trailer will be
4444716ea3a7SDavid Howells  *	set to point to the skb in which this space begins.
4445716ea3a7SDavid Howells  *
4446716ea3a7SDavid Howells  *	The number of scatterlist elements required to completely map the
4447716ea3a7SDavid Howells  *	COW'd and extended socket buffer will be returned.
4448716ea3a7SDavid Howells  */
4449716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4450716ea3a7SDavid Howells {
4451716ea3a7SDavid Howells 	int copyflag;
4452716ea3a7SDavid Howells 	int elt;
4453716ea3a7SDavid Howells 	struct sk_buff *skb1, **skb_p;
4454716ea3a7SDavid Howells 
4455716ea3a7SDavid Howells 	/* If skb is cloned or its head is paged, reallocate
4456716ea3a7SDavid Howells 	 * head pulling out all the pages (pages are considered not writable
4457716ea3a7SDavid Howells 	 * at the moment even if they are anonymous).
4458716ea3a7SDavid Howells 	 */
4459716ea3a7SDavid Howells 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4460c15fc199SMiaohe Lin 	    !__pskb_pull_tail(skb, __skb_pagelen(skb)))
4461716ea3a7SDavid Howells 		return -ENOMEM;
4462716ea3a7SDavid Howells 
4463716ea3a7SDavid Howells 	/* Easy case. Most of packets will go this way. */
446421dc3301SDavid S. Miller 	if (!skb_has_frag_list(skb)) {
4465716ea3a7SDavid Howells 		/* A little of trouble, not enough of space for trailer.
4466716ea3a7SDavid Howells 		 * This should not happen, when stack is tuned to generate
4467716ea3a7SDavid Howells 		 * good frames. OK, on miss we reallocate and reserve even more
4468716ea3a7SDavid Howells 		 * space, 128 bytes is fair. */
4469716ea3a7SDavid Howells 
4470716ea3a7SDavid Howells 		if (skb_tailroom(skb) < tailbits &&
4471716ea3a7SDavid Howells 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4472716ea3a7SDavid Howells 			return -ENOMEM;
4473716ea3a7SDavid Howells 
4474716ea3a7SDavid Howells 		/* Voila! */
4475716ea3a7SDavid Howells 		*trailer = skb;
4476716ea3a7SDavid Howells 		return 1;
4477716ea3a7SDavid Howells 	}
4478716ea3a7SDavid Howells 
4479716ea3a7SDavid Howells 	/* Misery. We are in troubles, going to mincer fragments... */
4480716ea3a7SDavid Howells 
4481716ea3a7SDavid Howells 	elt = 1;
4482716ea3a7SDavid Howells 	skb_p = &skb_shinfo(skb)->frag_list;
4483716ea3a7SDavid Howells 	copyflag = 0;
4484716ea3a7SDavid Howells 
4485716ea3a7SDavid Howells 	while ((skb1 = *skb_p) != NULL) {
4486716ea3a7SDavid Howells 		int ntail = 0;
4487716ea3a7SDavid Howells 
4488716ea3a7SDavid Howells 		/* The fragment is partially pulled by someone,
4489716ea3a7SDavid Howells 		 * this can happen on input. Copy it and everything
4490716ea3a7SDavid Howells 		 * after it. */
4491716ea3a7SDavid Howells 
4492716ea3a7SDavid Howells 		if (skb_shared(skb1))
4493716ea3a7SDavid Howells 			copyflag = 1;
4494716ea3a7SDavid Howells 
4495716ea3a7SDavid Howells 		/* If the skb is the last, worry about trailer. */
4496716ea3a7SDavid Howells 
4497716ea3a7SDavid Howells 		if (skb1->next == NULL && tailbits) {
4498716ea3a7SDavid Howells 			if (skb_shinfo(skb1)->nr_frags ||
449921dc3301SDavid S. Miller 			    skb_has_frag_list(skb1) ||
4500716ea3a7SDavid Howells 			    skb_tailroom(skb1) < tailbits)
4501716ea3a7SDavid Howells 				ntail = tailbits + 128;
4502716ea3a7SDavid Howells 		}
4503716ea3a7SDavid Howells 
4504716ea3a7SDavid Howells 		if (copyflag ||
4505716ea3a7SDavid Howells 		    skb_cloned(skb1) ||
4506716ea3a7SDavid Howells 		    ntail ||
4507716ea3a7SDavid Howells 		    skb_shinfo(skb1)->nr_frags ||
450821dc3301SDavid S. Miller 		    skb_has_frag_list(skb1)) {
4509716ea3a7SDavid Howells 			struct sk_buff *skb2;
4510716ea3a7SDavid Howells 
4511716ea3a7SDavid Howells 			/* Fuck, we are miserable poor guys... */
4512716ea3a7SDavid Howells 			if (ntail == 0)
4513716ea3a7SDavid Howells 				skb2 = skb_copy(skb1, GFP_ATOMIC);
4514716ea3a7SDavid Howells 			else
4515716ea3a7SDavid Howells 				skb2 = skb_copy_expand(skb1,
4516716ea3a7SDavid Howells 						       skb_headroom(skb1),
4517716ea3a7SDavid Howells 						       ntail,
4518716ea3a7SDavid Howells 						       GFP_ATOMIC);
4519716ea3a7SDavid Howells 			if (unlikely(skb2 == NULL))
4520716ea3a7SDavid Howells 				return -ENOMEM;
4521716ea3a7SDavid Howells 
4522716ea3a7SDavid Howells 			if (skb1->sk)
4523716ea3a7SDavid Howells 				skb_set_owner_w(skb2, skb1->sk);
4524716ea3a7SDavid Howells 
4525716ea3a7SDavid Howells 			/* Looking around. Are we still alive?
4526716ea3a7SDavid Howells 			 * OK, link new skb, drop old one */
4527716ea3a7SDavid Howells 
4528716ea3a7SDavid Howells 			skb2->next = skb1->next;
4529716ea3a7SDavid Howells 			*skb_p = skb2;
4530716ea3a7SDavid Howells 			kfree_skb(skb1);
4531716ea3a7SDavid Howells 			skb1 = skb2;
4532716ea3a7SDavid Howells 		}
4533716ea3a7SDavid Howells 		elt++;
4534716ea3a7SDavid Howells 		*trailer = skb1;
4535716ea3a7SDavid Howells 		skb_p = &skb1->next;
4536716ea3a7SDavid Howells 	}
4537716ea3a7SDavid Howells 
4538716ea3a7SDavid Howells 	return elt;
4539716ea3a7SDavid Howells }
4540b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data);
4541716ea3a7SDavid Howells 
4542b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb)
4543b1faf566SEric Dumazet {
4544b1faf566SEric Dumazet 	struct sock *sk = skb->sk;
4545b1faf566SEric Dumazet 
4546b1faf566SEric Dumazet 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4547b1faf566SEric Dumazet }
4548b1faf566SEric Dumazet 
45498605330aSSoheil Hassas Yeganeh static void skb_set_err_queue(struct sk_buff *skb)
45508605330aSSoheil Hassas Yeganeh {
45518605330aSSoheil Hassas Yeganeh 	/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
45528605330aSSoheil Hassas Yeganeh 	 * So, it is safe to (mis)use it to mark skbs on the error queue.
45538605330aSSoheil Hassas Yeganeh 	 */
45548605330aSSoheil Hassas Yeganeh 	skb->pkt_type = PACKET_OUTGOING;
45558605330aSSoheil Hassas Yeganeh 	BUILD_BUG_ON(PACKET_OUTGOING == 0);
45568605330aSSoheil Hassas Yeganeh }
45578605330aSSoheil Hassas Yeganeh 
4558b1faf566SEric Dumazet /*
4559b1faf566SEric Dumazet  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4560b1faf566SEric Dumazet  */
4561b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4562b1faf566SEric Dumazet {
4563b1faf566SEric Dumazet 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4564ebb3b78dSEric Dumazet 	    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4565b1faf566SEric Dumazet 		return -ENOMEM;
4566b1faf566SEric Dumazet 
4567b1faf566SEric Dumazet 	skb_orphan(skb);
4568b1faf566SEric Dumazet 	skb->sk = sk;
4569b1faf566SEric Dumazet 	skb->destructor = sock_rmem_free;
4570b1faf566SEric Dumazet 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
45718605330aSSoheil Hassas Yeganeh 	skb_set_err_queue(skb);
4572b1faf566SEric Dumazet 
4573abb57ea4SEric Dumazet 	/* before exiting rcu section, make sure dst is refcounted */
4574abb57ea4SEric Dumazet 	skb_dst_force(skb);
4575abb57ea4SEric Dumazet 
4576b1faf566SEric Dumazet 	skb_queue_tail(&sk->sk_error_queue, skb);
4577b1faf566SEric Dumazet 	if (!sock_flag(sk, SOCK_DEAD))
45786e5d58fdSVinicius Costa Gomes 		sk->sk_error_report(sk);
4579b1faf566SEric Dumazet 	return 0;
4580b1faf566SEric Dumazet }
4581b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb);
4582b1faf566SEric Dumazet 
458383a1a1a7SSoheil Hassas Yeganeh static bool is_icmp_err_skb(const struct sk_buff *skb)
458483a1a1a7SSoheil Hassas Yeganeh {
458583a1a1a7SSoheil Hassas Yeganeh 	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
458683a1a1a7SSoheil Hassas Yeganeh 		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
458783a1a1a7SSoheil Hassas Yeganeh }
458883a1a1a7SSoheil Hassas Yeganeh 
4589364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4590364a9e93SWillem de Bruijn {
4591364a9e93SWillem de Bruijn 	struct sk_buff_head *q = &sk->sk_error_queue;
459283a1a1a7SSoheil Hassas Yeganeh 	struct sk_buff *skb, *skb_next = NULL;
459383a1a1a7SSoheil Hassas Yeganeh 	bool icmp_next = false;
4594997d5c3fSEric Dumazet 	unsigned long flags;
4595364a9e93SWillem de Bruijn 
4596997d5c3fSEric Dumazet 	spin_lock_irqsave(&q->lock, flags);
4597364a9e93SWillem de Bruijn 	skb = __skb_dequeue(q);
459838b25793SSoheil Hassas Yeganeh 	if (skb && (skb_next = skb_peek(q))) {
459983a1a1a7SSoheil Hassas Yeganeh 		icmp_next = is_icmp_err_skb(skb_next);
460038b25793SSoheil Hassas Yeganeh 		if (icmp_next)
4601985f7337SWillem de Bruijn 			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
460238b25793SSoheil Hassas Yeganeh 	}
4603997d5c3fSEric Dumazet 	spin_unlock_irqrestore(&q->lock, flags);
4604364a9e93SWillem de Bruijn 
460583a1a1a7SSoheil Hassas Yeganeh 	if (is_icmp_err_skb(skb) && !icmp_next)
460683a1a1a7SSoheil Hassas Yeganeh 		sk->sk_err = 0;
460783a1a1a7SSoheil Hassas Yeganeh 
460883a1a1a7SSoheil Hassas Yeganeh 	if (skb_next)
4609364a9e93SWillem de Bruijn 		sk->sk_error_report(sk);
4610364a9e93SWillem de Bruijn 
4611364a9e93SWillem de Bruijn 	return skb;
4612364a9e93SWillem de Bruijn }
4613364a9e93SWillem de Bruijn EXPORT_SYMBOL(sock_dequeue_err_skb);
4614364a9e93SWillem de Bruijn 
4615cab41c47SAlexander Duyck /**
4616cab41c47SAlexander Duyck  * skb_clone_sk - create clone of skb, and take reference to socket
4617cab41c47SAlexander Duyck  * @skb: the skb to clone
4618cab41c47SAlexander Duyck  *
4619cab41c47SAlexander Duyck  * This function creates a clone of a buffer that holds a reference on
4620cab41c47SAlexander Duyck  * sk_refcnt.  Buffers created via this function are meant to be
4621cab41c47SAlexander Duyck  * returned using sock_queue_err_skb, or free via kfree_skb.
4622cab41c47SAlexander Duyck  *
4623cab41c47SAlexander Duyck  * When passing buffers allocated with this function to sock_queue_err_skb
4624cab41c47SAlexander Duyck  * it is necessary to wrap the call with sock_hold/sock_put in order to
4625cab41c47SAlexander Duyck  * prevent the socket from being released prior to being enqueued on
4626cab41c47SAlexander Duyck  * the sk_error_queue.
4627cab41c47SAlexander Duyck  */
462862bccb8cSAlexander Duyck struct sk_buff *skb_clone_sk(struct sk_buff *skb)
462962bccb8cSAlexander Duyck {
463062bccb8cSAlexander Duyck 	struct sock *sk = skb->sk;
463162bccb8cSAlexander Duyck 	struct sk_buff *clone;
463262bccb8cSAlexander Duyck 
463341c6d650SReshetova, Elena 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
463462bccb8cSAlexander Duyck 		return NULL;
463562bccb8cSAlexander Duyck 
463662bccb8cSAlexander Duyck 	clone = skb_clone(skb, GFP_ATOMIC);
463762bccb8cSAlexander Duyck 	if (!clone) {
463862bccb8cSAlexander Duyck 		sock_put(sk);
463962bccb8cSAlexander Duyck 		return NULL;
464062bccb8cSAlexander Duyck 	}
464162bccb8cSAlexander Duyck 
464262bccb8cSAlexander Duyck 	clone->sk = sk;
464362bccb8cSAlexander Duyck 	clone->destructor = sock_efree;
464462bccb8cSAlexander Duyck 
464562bccb8cSAlexander Duyck 	return clone;
464662bccb8cSAlexander Duyck }
464762bccb8cSAlexander Duyck EXPORT_SYMBOL(skb_clone_sk);
464862bccb8cSAlexander Duyck 
464937846ef0SAlexander Duyck static void __skb_complete_tx_timestamp(struct sk_buff *skb,
465037846ef0SAlexander Duyck 					struct sock *sk,
46514ef1b286SSoheil Hassas Yeganeh 					int tstype,
46524ef1b286SSoheil Hassas Yeganeh 					bool opt_stats)
4653ac45f602SPatrick Ohly {
4654ac45f602SPatrick Ohly 	struct sock_exterr_skb *serr;
4655ac45f602SPatrick Ohly 	int err;
4656ac45f602SPatrick Ohly 
46574ef1b286SSoheil Hassas Yeganeh 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
46584ef1b286SSoheil Hassas Yeganeh 
4659ac45f602SPatrick Ohly 	serr = SKB_EXT_ERR(skb);
4660ac45f602SPatrick Ohly 	memset(serr, 0, sizeof(*serr));
4661ac45f602SPatrick Ohly 	serr->ee.ee_errno = ENOMSG;
4662ac45f602SPatrick Ohly 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4663e7fd2885SWillem de Bruijn 	serr->ee.ee_info = tstype;
46644ef1b286SSoheil Hassas Yeganeh 	serr->opt_stats = opt_stats;
46651862d620SWillem de Bruijn 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
46664ed2d765SWillem de Bruijn 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
466709c2d251SWillem de Bruijn 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
4668ac5cc977SWANG Cong 		if (sk->sk_protocol == IPPROTO_TCP &&
4669ac5cc977SWANG Cong 		    sk->sk_type == SOCK_STREAM)
46704ed2d765SWillem de Bruijn 			serr->ee.ee_data -= sk->sk_tskey;
46714ed2d765SWillem de Bruijn 	}
467229030374SEric Dumazet 
4673ac45f602SPatrick Ohly 	err = sock_queue_err_skb(sk, skb);
467429030374SEric Dumazet 
4675ac45f602SPatrick Ohly 	if (err)
4676ac45f602SPatrick Ohly 		kfree_skb(skb);
4677ac45f602SPatrick Ohly }
467837846ef0SAlexander Duyck 
4679b245be1fSWillem de Bruijn static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4680b245be1fSWillem de Bruijn {
4681b245be1fSWillem de Bruijn 	bool ret;
4682b245be1fSWillem de Bruijn 
4683b245be1fSWillem de Bruijn 	if (likely(sysctl_tstamp_allow_data || tsonly))
4684b245be1fSWillem de Bruijn 		return true;
4685b245be1fSWillem de Bruijn 
4686b245be1fSWillem de Bruijn 	read_lock_bh(&sk->sk_callback_lock);
4687b245be1fSWillem de Bruijn 	ret = sk->sk_socket && sk->sk_socket->file &&
4688b245be1fSWillem de Bruijn 	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4689b245be1fSWillem de Bruijn 	read_unlock_bh(&sk->sk_callback_lock);
4690b245be1fSWillem de Bruijn 	return ret;
4691b245be1fSWillem de Bruijn }
4692b245be1fSWillem de Bruijn 
469337846ef0SAlexander Duyck void skb_complete_tx_timestamp(struct sk_buff *skb,
469437846ef0SAlexander Duyck 			       struct skb_shared_hwtstamps *hwtstamps)
469537846ef0SAlexander Duyck {
469637846ef0SAlexander Duyck 	struct sock *sk = skb->sk;
469737846ef0SAlexander Duyck 
4698b245be1fSWillem de Bruijn 	if (!skb_may_tx_timestamp(sk, false))
469935b99dffSWillem de Bruijn 		goto err;
4700b245be1fSWillem de Bruijn 
47019ac25fc0SEric Dumazet 	/* Take a reference to prevent skb_orphan() from freeing the socket,
47029ac25fc0SEric Dumazet 	 * but only if the socket refcount is not zero.
47039ac25fc0SEric Dumazet 	 */
470441c6d650SReshetova, Elena 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
470537846ef0SAlexander Duyck 		*skb_hwtstamps(skb) = *hwtstamps;
47064ef1b286SSoheil Hassas Yeganeh 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
470737846ef0SAlexander Duyck 		sock_put(sk);
470835b99dffSWillem de Bruijn 		return;
470937846ef0SAlexander Duyck 	}
471035b99dffSWillem de Bruijn 
471135b99dffSWillem de Bruijn err:
471235b99dffSWillem de Bruijn 	kfree_skb(skb);
47139ac25fc0SEric Dumazet }
471437846ef0SAlexander Duyck EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
471537846ef0SAlexander Duyck 
471637846ef0SAlexander Duyck void __skb_tstamp_tx(struct sk_buff *orig_skb,
471737846ef0SAlexander Duyck 		     struct skb_shared_hwtstamps *hwtstamps,
471837846ef0SAlexander Duyck 		     struct sock *sk, int tstype)
471937846ef0SAlexander Duyck {
472037846ef0SAlexander Duyck 	struct sk_buff *skb;
47214ef1b286SSoheil Hassas Yeganeh 	bool tsonly, opt_stats = false;
472237846ef0SAlexander Duyck 
47233a8dd971SWillem de Bruijn 	if (!sk)
47243a8dd971SWillem de Bruijn 		return;
47253a8dd971SWillem de Bruijn 
4726b50a5c70SMiroslav Lichvar 	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4727b50a5c70SMiroslav Lichvar 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4728b50a5c70SMiroslav Lichvar 		return;
4729b50a5c70SMiroslav Lichvar 
47303a8dd971SWillem de Bruijn 	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
47313a8dd971SWillem de Bruijn 	if (!skb_may_tx_timestamp(sk, tsonly))
473237846ef0SAlexander Duyck 		return;
473337846ef0SAlexander Duyck 
47341c885808SFrancis Yan 	if (tsonly) {
47351c885808SFrancis Yan #ifdef CONFIG_INET
47361c885808SFrancis Yan 		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
47371c885808SFrancis Yan 		    sk->sk_protocol == IPPROTO_TCP &&
47384ef1b286SSoheil Hassas Yeganeh 		    sk->sk_type == SOCK_STREAM) {
473948040793SYousuk Seung 			skb = tcp_get_timestamping_opt_stats(sk, orig_skb);
47404ef1b286SSoheil Hassas Yeganeh 			opt_stats = true;
47414ef1b286SSoheil Hassas Yeganeh 		} else
47421c885808SFrancis Yan #endif
47431c885808SFrancis Yan 			skb = alloc_skb(0, GFP_ATOMIC);
47441c885808SFrancis Yan 	} else {
474537846ef0SAlexander Duyck 		skb = skb_clone(orig_skb, GFP_ATOMIC);
47461c885808SFrancis Yan 	}
474737846ef0SAlexander Duyck 	if (!skb)
474837846ef0SAlexander Duyck 		return;
474937846ef0SAlexander Duyck 
475049ca0d8bSWillem de Bruijn 	if (tsonly) {
4751fff88030SWillem de Bruijn 		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4752fff88030SWillem de Bruijn 					     SKBTX_ANY_TSTAMP;
475349ca0d8bSWillem de Bruijn 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
475449ca0d8bSWillem de Bruijn 	}
475549ca0d8bSWillem de Bruijn 
475649ca0d8bSWillem de Bruijn 	if (hwtstamps)
475749ca0d8bSWillem de Bruijn 		*skb_hwtstamps(skb) = *hwtstamps;
475849ca0d8bSWillem de Bruijn 	else
475949ca0d8bSWillem de Bruijn 		skb->tstamp = ktime_get_real();
476049ca0d8bSWillem de Bruijn 
47614ef1b286SSoheil Hassas Yeganeh 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
476237846ef0SAlexander Duyck }
4763e7fd2885SWillem de Bruijn EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4764e7fd2885SWillem de Bruijn 
4765e7fd2885SWillem de Bruijn void skb_tstamp_tx(struct sk_buff *orig_skb,
4766e7fd2885SWillem de Bruijn 		   struct skb_shared_hwtstamps *hwtstamps)
4767e7fd2885SWillem de Bruijn {
4768e7fd2885SWillem de Bruijn 	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4769e7fd2885SWillem de Bruijn 			       SCM_TSTAMP_SND);
4770e7fd2885SWillem de Bruijn }
4771ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4772ac45f602SPatrick Ohly 
47736e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
47746e3e939fSJohannes Berg {
47756e3e939fSJohannes Berg 	struct sock *sk = skb->sk;
47766e3e939fSJohannes Berg 	struct sock_exterr_skb *serr;
4777dd4f1072SEric Dumazet 	int err = 1;
47786e3e939fSJohannes Berg 
47796e3e939fSJohannes Berg 	skb->wifi_acked_valid = 1;
47806e3e939fSJohannes Berg 	skb->wifi_acked = acked;
47816e3e939fSJohannes Berg 
47826e3e939fSJohannes Berg 	serr = SKB_EXT_ERR(skb);
47836e3e939fSJohannes Berg 	memset(serr, 0, sizeof(*serr));
47846e3e939fSJohannes Berg 	serr->ee.ee_errno = ENOMSG;
47856e3e939fSJohannes Berg 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
47866e3e939fSJohannes Berg 
4787dd4f1072SEric Dumazet 	/* Take a reference to prevent skb_orphan() from freeing the socket,
4788dd4f1072SEric Dumazet 	 * but only if the socket refcount is not zero.
4789dd4f1072SEric Dumazet 	 */
479041c6d650SReshetova, Elena 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
47916e3e939fSJohannes Berg 		err = sock_queue_err_skb(sk, skb);
4792dd4f1072SEric Dumazet 		sock_put(sk);
4793dd4f1072SEric Dumazet 	}
47946e3e939fSJohannes Berg 	if (err)
47956e3e939fSJohannes Berg 		kfree_skb(skb);
47966e3e939fSJohannes Berg }
47976e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
47986e3e939fSJohannes Berg 
4799f35d9d8aSRusty Russell /**
4800f35d9d8aSRusty Russell  * skb_partial_csum_set - set up and verify partial csum values for packet
4801f35d9d8aSRusty Russell  * @skb: the skb to set
4802f35d9d8aSRusty Russell  * @start: the number of bytes after skb->data to start checksumming.
4803f35d9d8aSRusty Russell  * @off: the offset from start to place the checksum.
4804f35d9d8aSRusty Russell  *
4805f35d9d8aSRusty Russell  * For untrusted partially-checksummed packets, we need to make sure the values
4806f35d9d8aSRusty Russell  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4807f35d9d8aSRusty Russell  *
4808f35d9d8aSRusty Russell  * This function checks and sets those values and skb->ip_summed: if this
4809f35d9d8aSRusty Russell  * returns false you should drop the packet.
4810f35d9d8aSRusty Russell  */
4811f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4812f35d9d8aSRusty Russell {
481352b5d6f5SEric Dumazet 	u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
481452b5d6f5SEric Dumazet 	u32 csum_start = skb_headroom(skb) + (u32)start;
481552b5d6f5SEric Dumazet 
481652b5d6f5SEric Dumazet 	if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
481752b5d6f5SEric Dumazet 		net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
481852b5d6f5SEric Dumazet 				     start, off, skb_headroom(skb), skb_headlen(skb));
4819f35d9d8aSRusty Russell 		return false;
4820f35d9d8aSRusty Russell 	}
4821f35d9d8aSRusty Russell 	skb->ip_summed = CHECKSUM_PARTIAL;
482252b5d6f5SEric Dumazet 	skb->csum_start = csum_start;
4823f35d9d8aSRusty Russell 	skb->csum_offset = off;
4824e5d5decaSJason Wang 	skb_set_transport_header(skb, start);
4825f35d9d8aSRusty Russell 	return true;
4826f35d9d8aSRusty Russell }
4827b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4828f35d9d8aSRusty Russell 
4829ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4830ed1f50c3SPaul Durrant 			       unsigned int max)
4831ed1f50c3SPaul Durrant {
4832ed1f50c3SPaul Durrant 	if (skb_headlen(skb) >= len)
4833ed1f50c3SPaul Durrant 		return 0;
4834ed1f50c3SPaul Durrant 
4835ed1f50c3SPaul Durrant 	/* If we need to pullup then pullup to the max, so we
4836ed1f50c3SPaul Durrant 	 * won't need to do it again.
4837ed1f50c3SPaul Durrant 	 */
4838ed1f50c3SPaul Durrant 	if (max > skb->len)
4839ed1f50c3SPaul Durrant 		max = skb->len;
4840ed1f50c3SPaul Durrant 
4841ed1f50c3SPaul Durrant 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4842ed1f50c3SPaul Durrant 		return -ENOMEM;
4843ed1f50c3SPaul Durrant 
4844ed1f50c3SPaul Durrant 	if (skb_headlen(skb) < len)
4845ed1f50c3SPaul Durrant 		return -EPROTO;
4846ed1f50c3SPaul Durrant 
4847ed1f50c3SPaul Durrant 	return 0;
4848ed1f50c3SPaul Durrant }
4849ed1f50c3SPaul Durrant 
4850f9708b43SJan Beulich #define MAX_TCP_HDR_LEN (15 * 4)
4851f9708b43SJan Beulich 
4852f9708b43SJan Beulich static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4853f9708b43SJan Beulich 				      typeof(IPPROTO_IP) proto,
4854f9708b43SJan Beulich 				      unsigned int off)
4855f9708b43SJan Beulich {
4856f9708b43SJan Beulich 	int err;
4857f9708b43SJan Beulich 
4858161d1792SKees Cook 	switch (proto) {
4859f9708b43SJan Beulich 	case IPPROTO_TCP:
4860f9708b43SJan Beulich 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4861f9708b43SJan Beulich 					  off + MAX_TCP_HDR_LEN);
4862f9708b43SJan Beulich 		if (!err && !skb_partial_csum_set(skb, off,
4863f9708b43SJan Beulich 						  offsetof(struct tcphdr,
4864f9708b43SJan Beulich 							   check)))
4865f9708b43SJan Beulich 			err = -EPROTO;
4866f9708b43SJan Beulich 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4867f9708b43SJan Beulich 
4868f9708b43SJan Beulich 	case IPPROTO_UDP:
4869f9708b43SJan Beulich 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4870f9708b43SJan Beulich 					  off + sizeof(struct udphdr));
4871f9708b43SJan Beulich 		if (!err && !skb_partial_csum_set(skb, off,
4872f9708b43SJan Beulich 						  offsetof(struct udphdr,
4873f9708b43SJan Beulich 							   check)))
4874f9708b43SJan Beulich 			err = -EPROTO;
4875f9708b43SJan Beulich 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4876f9708b43SJan Beulich 	}
4877f9708b43SJan Beulich 
4878f9708b43SJan Beulich 	return ERR_PTR(-EPROTO);
4879f9708b43SJan Beulich }
4880f9708b43SJan Beulich 
4881ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus
4882ed1f50c3SPaul Durrant  * maximally sized IP and TCP or UDP headers.
4883ed1f50c3SPaul Durrant  */
4884ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128
4885ed1f50c3SPaul Durrant 
4886f9708b43SJan Beulich static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4887ed1f50c3SPaul Durrant {
4888ed1f50c3SPaul Durrant 	unsigned int off;
4889ed1f50c3SPaul Durrant 	bool fragment;
4890f9708b43SJan Beulich 	__sum16 *csum;
4891ed1f50c3SPaul Durrant 	int err;
4892ed1f50c3SPaul Durrant 
4893ed1f50c3SPaul Durrant 	fragment = false;
4894ed1f50c3SPaul Durrant 
4895ed1f50c3SPaul Durrant 	err = skb_maybe_pull_tail(skb,
4896ed1f50c3SPaul Durrant 				  sizeof(struct iphdr),
4897ed1f50c3SPaul Durrant 				  MAX_IP_HDR_LEN);
4898ed1f50c3SPaul Durrant 	if (err < 0)
4899ed1f50c3SPaul Durrant 		goto out;
4900ed1f50c3SPaul Durrant 
490111f920d2SMiaohe Lin 	if (ip_is_fragment(ip_hdr(skb)))
4902ed1f50c3SPaul Durrant 		fragment = true;
4903ed1f50c3SPaul Durrant 
4904ed1f50c3SPaul Durrant 	off = ip_hdrlen(skb);
4905ed1f50c3SPaul Durrant 
4906ed1f50c3SPaul Durrant 	err = -EPROTO;
4907ed1f50c3SPaul Durrant 
4908ed1f50c3SPaul Durrant 	if (fragment)
4909ed1f50c3SPaul Durrant 		goto out;
4910ed1f50c3SPaul Durrant 
4911f9708b43SJan Beulich 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4912f9708b43SJan Beulich 	if (IS_ERR(csum))
4913f9708b43SJan Beulich 		return PTR_ERR(csum);
4914ed1f50c3SPaul Durrant 
4915ed1f50c3SPaul Durrant 	if (recalculate)
4916f9708b43SJan Beulich 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4917ed1f50c3SPaul Durrant 					   ip_hdr(skb)->daddr,
4918ed1f50c3SPaul Durrant 					   skb->len - off,
4919f9708b43SJan Beulich 					   ip_hdr(skb)->protocol, 0);
4920ed1f50c3SPaul Durrant 	err = 0;
4921ed1f50c3SPaul Durrant 
4922ed1f50c3SPaul Durrant out:
4923ed1f50c3SPaul Durrant 	return err;
4924ed1f50c3SPaul Durrant }
4925ed1f50c3SPaul Durrant 
4926ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus
4927ed1f50c3SPaul Durrant  * an IPv6 header, all options, and a maximal TCP or UDP header.
4928ed1f50c3SPaul Durrant  */
4929ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256
4930ed1f50c3SPaul Durrant 
4931ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \
4932ed1f50c3SPaul Durrant 	(type *)(skb_network_header(skb) + (off))
4933ed1f50c3SPaul Durrant 
4934ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4935ed1f50c3SPaul Durrant {
4936ed1f50c3SPaul Durrant 	int err;
4937ed1f50c3SPaul Durrant 	u8 nexthdr;
4938ed1f50c3SPaul Durrant 	unsigned int off;
4939ed1f50c3SPaul Durrant 	unsigned int len;
4940ed1f50c3SPaul Durrant 	bool fragment;
4941ed1f50c3SPaul Durrant 	bool done;
4942f9708b43SJan Beulich 	__sum16 *csum;
4943ed1f50c3SPaul Durrant 
4944ed1f50c3SPaul Durrant 	fragment = false;
4945ed1f50c3SPaul Durrant 	done = false;
4946ed1f50c3SPaul Durrant 
4947ed1f50c3SPaul Durrant 	off = sizeof(struct ipv6hdr);
4948ed1f50c3SPaul Durrant 
4949ed1f50c3SPaul Durrant 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4950ed1f50c3SPaul Durrant 	if (err < 0)
4951ed1f50c3SPaul Durrant 		goto out;
4952ed1f50c3SPaul Durrant 
4953ed1f50c3SPaul Durrant 	nexthdr = ipv6_hdr(skb)->nexthdr;
4954ed1f50c3SPaul Durrant 
4955ed1f50c3SPaul Durrant 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4956ed1f50c3SPaul Durrant 	while (off <= len && !done) {
4957ed1f50c3SPaul Durrant 		switch (nexthdr) {
4958ed1f50c3SPaul Durrant 		case IPPROTO_DSTOPTS:
4959ed1f50c3SPaul Durrant 		case IPPROTO_HOPOPTS:
4960ed1f50c3SPaul Durrant 		case IPPROTO_ROUTING: {
4961ed1f50c3SPaul Durrant 			struct ipv6_opt_hdr *hp;
4962ed1f50c3SPaul Durrant 
4963ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
4964ed1f50c3SPaul Durrant 						  off +
4965ed1f50c3SPaul Durrant 						  sizeof(struct ipv6_opt_hdr),
4966ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
4967ed1f50c3SPaul Durrant 			if (err < 0)
4968ed1f50c3SPaul Durrant 				goto out;
4969ed1f50c3SPaul Durrant 
4970ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4971ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
4972ed1f50c3SPaul Durrant 			off += ipv6_optlen(hp);
4973ed1f50c3SPaul Durrant 			break;
4974ed1f50c3SPaul Durrant 		}
4975ed1f50c3SPaul Durrant 		case IPPROTO_AH: {
4976ed1f50c3SPaul Durrant 			struct ip_auth_hdr *hp;
4977ed1f50c3SPaul Durrant 
4978ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
4979ed1f50c3SPaul Durrant 						  off +
4980ed1f50c3SPaul Durrant 						  sizeof(struct ip_auth_hdr),
4981ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
4982ed1f50c3SPaul Durrant 			if (err < 0)
4983ed1f50c3SPaul Durrant 				goto out;
4984ed1f50c3SPaul Durrant 
4985ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4986ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
4987ed1f50c3SPaul Durrant 			off += ipv6_authlen(hp);
4988ed1f50c3SPaul Durrant 			break;
4989ed1f50c3SPaul Durrant 		}
4990ed1f50c3SPaul Durrant 		case IPPROTO_FRAGMENT: {
4991ed1f50c3SPaul Durrant 			struct frag_hdr *hp;
4992ed1f50c3SPaul Durrant 
4993ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
4994ed1f50c3SPaul Durrant 						  off +
4995ed1f50c3SPaul Durrant 						  sizeof(struct frag_hdr),
4996ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
4997ed1f50c3SPaul Durrant 			if (err < 0)
4998ed1f50c3SPaul Durrant 				goto out;
4999ed1f50c3SPaul Durrant 
5000ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct frag_hdr, skb, off);
5001ed1f50c3SPaul Durrant 
5002ed1f50c3SPaul Durrant 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5003ed1f50c3SPaul Durrant 				fragment = true;
5004ed1f50c3SPaul Durrant 
5005ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
5006ed1f50c3SPaul Durrant 			off += sizeof(struct frag_hdr);
5007ed1f50c3SPaul Durrant 			break;
5008ed1f50c3SPaul Durrant 		}
5009ed1f50c3SPaul Durrant 		default:
5010ed1f50c3SPaul Durrant 			done = true;
5011ed1f50c3SPaul Durrant 			break;
5012ed1f50c3SPaul Durrant 		}
5013ed1f50c3SPaul Durrant 	}
5014ed1f50c3SPaul Durrant 
5015ed1f50c3SPaul Durrant 	err = -EPROTO;
5016ed1f50c3SPaul Durrant 
5017ed1f50c3SPaul Durrant 	if (!done || fragment)
5018ed1f50c3SPaul Durrant 		goto out;
5019ed1f50c3SPaul Durrant 
5020f9708b43SJan Beulich 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
5021f9708b43SJan Beulich 	if (IS_ERR(csum))
5022f9708b43SJan Beulich 		return PTR_ERR(csum);
5023ed1f50c3SPaul Durrant 
5024ed1f50c3SPaul Durrant 	if (recalculate)
5025f9708b43SJan Beulich 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5026ed1f50c3SPaul Durrant 					 &ipv6_hdr(skb)->daddr,
5027f9708b43SJan Beulich 					 skb->len - off, nexthdr, 0);
5028ed1f50c3SPaul Durrant 	err = 0;
5029ed1f50c3SPaul Durrant 
5030ed1f50c3SPaul Durrant out:
5031ed1f50c3SPaul Durrant 	return err;
5032ed1f50c3SPaul Durrant }
5033ed1f50c3SPaul Durrant 
5034ed1f50c3SPaul Durrant /**
5035ed1f50c3SPaul Durrant  * skb_checksum_setup - set up partial checksum offset
5036ed1f50c3SPaul Durrant  * @skb: the skb to set up
5037ed1f50c3SPaul Durrant  * @recalculate: if true the pseudo-header checksum will be recalculated
5038ed1f50c3SPaul Durrant  */
5039ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5040ed1f50c3SPaul Durrant {
5041ed1f50c3SPaul Durrant 	int err;
5042ed1f50c3SPaul Durrant 
5043ed1f50c3SPaul Durrant 	switch (skb->protocol) {
5044ed1f50c3SPaul Durrant 	case htons(ETH_P_IP):
5045f9708b43SJan Beulich 		err = skb_checksum_setup_ipv4(skb, recalculate);
5046ed1f50c3SPaul Durrant 		break;
5047ed1f50c3SPaul Durrant 
5048ed1f50c3SPaul Durrant 	case htons(ETH_P_IPV6):
5049ed1f50c3SPaul Durrant 		err = skb_checksum_setup_ipv6(skb, recalculate);
5050ed1f50c3SPaul Durrant 		break;
5051ed1f50c3SPaul Durrant 
5052ed1f50c3SPaul Durrant 	default:
5053ed1f50c3SPaul Durrant 		err = -EPROTO;
5054ed1f50c3SPaul Durrant 		break;
5055ed1f50c3SPaul Durrant 	}
5056ed1f50c3SPaul Durrant 
5057ed1f50c3SPaul Durrant 	return err;
5058ed1f50c3SPaul Durrant }
5059ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup);
5060ed1f50c3SPaul Durrant 
50619afd85c9SLinus Lüssing /**
50629afd85c9SLinus Lüssing  * skb_checksum_maybe_trim - maybe trims the given skb
50639afd85c9SLinus Lüssing  * @skb: the skb to check
50649afd85c9SLinus Lüssing  * @transport_len: the data length beyond the network header
50659afd85c9SLinus Lüssing  *
50669afd85c9SLinus Lüssing  * Checks whether the given skb has data beyond the given transport length.
50679afd85c9SLinus Lüssing  * If so, returns a cloned skb trimmed to this transport length.
50689afd85c9SLinus Lüssing  * Otherwise returns the provided skb. Returns NULL in error cases
50699afd85c9SLinus Lüssing  * (e.g. transport_len exceeds skb length or out-of-memory).
50709afd85c9SLinus Lüssing  *
5071a516993fSLinus Lüssing  * Caller needs to set the skb transport header and free any returned skb if it
5072a516993fSLinus Lüssing  * differs from the provided skb.
50739afd85c9SLinus Lüssing  */
50749afd85c9SLinus Lüssing static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
50759afd85c9SLinus Lüssing 					       unsigned int transport_len)
50769afd85c9SLinus Lüssing {
50779afd85c9SLinus Lüssing 	struct sk_buff *skb_chk;
50789afd85c9SLinus Lüssing 	unsigned int len = skb_transport_offset(skb) + transport_len;
50799afd85c9SLinus Lüssing 	int ret;
50809afd85c9SLinus Lüssing 
5081a516993fSLinus Lüssing 	if (skb->len < len)
50829afd85c9SLinus Lüssing 		return NULL;
5083a516993fSLinus Lüssing 	else if (skb->len == len)
50849afd85c9SLinus Lüssing 		return skb;
50859afd85c9SLinus Lüssing 
50869afd85c9SLinus Lüssing 	skb_chk = skb_clone(skb, GFP_ATOMIC);
50879afd85c9SLinus Lüssing 	if (!skb_chk)
50889afd85c9SLinus Lüssing 		return NULL;
50899afd85c9SLinus Lüssing 
50909afd85c9SLinus Lüssing 	ret = pskb_trim_rcsum(skb_chk, len);
50919afd85c9SLinus Lüssing 	if (ret) {
50929afd85c9SLinus Lüssing 		kfree_skb(skb_chk);
50939afd85c9SLinus Lüssing 		return NULL;
50949afd85c9SLinus Lüssing 	}
50959afd85c9SLinus Lüssing 
50969afd85c9SLinus Lüssing 	return skb_chk;
50979afd85c9SLinus Lüssing }
50989afd85c9SLinus Lüssing 
50999afd85c9SLinus Lüssing /**
51009afd85c9SLinus Lüssing  * skb_checksum_trimmed - validate checksum of an skb
51019afd85c9SLinus Lüssing  * @skb: the skb to check
51029afd85c9SLinus Lüssing  * @transport_len: the data length beyond the network header
51039afd85c9SLinus Lüssing  * @skb_chkf: checksum function to use
51049afd85c9SLinus Lüssing  *
51059afd85c9SLinus Lüssing  * Applies the given checksum function skb_chkf to the provided skb.
51069afd85c9SLinus Lüssing  * Returns a checked and maybe trimmed skb. Returns NULL on error.
51079afd85c9SLinus Lüssing  *
51089afd85c9SLinus Lüssing  * If the skb has data beyond the given transport length, then a
51099afd85c9SLinus Lüssing  * trimmed & cloned skb is checked and returned.
51109afd85c9SLinus Lüssing  *
5111a516993fSLinus Lüssing  * Caller needs to set the skb transport header and free any returned skb if it
5112a516993fSLinus Lüssing  * differs from the provided skb.
51139afd85c9SLinus Lüssing  */
51149afd85c9SLinus Lüssing struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
51159afd85c9SLinus Lüssing 				     unsigned int transport_len,
51169afd85c9SLinus Lüssing 				     __sum16(*skb_chkf)(struct sk_buff *skb))
51179afd85c9SLinus Lüssing {
51189afd85c9SLinus Lüssing 	struct sk_buff *skb_chk;
51199afd85c9SLinus Lüssing 	unsigned int offset = skb_transport_offset(skb);
5120fcba67c9SLinus Lüssing 	__sum16 ret;
51219afd85c9SLinus Lüssing 
51229afd85c9SLinus Lüssing 	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
51239afd85c9SLinus Lüssing 	if (!skb_chk)
5124a516993fSLinus Lüssing 		goto err;
51259afd85c9SLinus Lüssing 
5126a516993fSLinus Lüssing 	if (!pskb_may_pull(skb_chk, offset))
5127a516993fSLinus Lüssing 		goto err;
51289afd85c9SLinus Lüssing 
51299b368814SLinus Lüssing 	skb_pull_rcsum(skb_chk, offset);
51309afd85c9SLinus Lüssing 	ret = skb_chkf(skb_chk);
51319b368814SLinus Lüssing 	skb_push_rcsum(skb_chk, offset);
51329afd85c9SLinus Lüssing 
5133a516993fSLinus Lüssing 	if (ret)
5134a516993fSLinus Lüssing 		goto err;
51359afd85c9SLinus Lüssing 
51369afd85c9SLinus Lüssing 	return skb_chk;
5137a516993fSLinus Lüssing 
5138a516993fSLinus Lüssing err:
5139a516993fSLinus Lüssing 	if (skb_chk && skb_chk != skb)
5140a516993fSLinus Lüssing 		kfree_skb(skb_chk);
5141a516993fSLinus Lüssing 
5142a516993fSLinus Lüssing 	return NULL;
5143a516993fSLinus Lüssing 
51449afd85c9SLinus Lüssing }
51459afd85c9SLinus Lüssing EXPORT_SYMBOL(skb_checksum_trimmed);
51469afd85c9SLinus Lüssing 
51474497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb)
51484497b076SBen Hutchings {
5149e87cc472SJoe Perches 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5150e87cc472SJoe Perches 			     skb->dev->name);
51514497b076SBen Hutchings }
51524497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5153bad43ca8SEric Dumazet 
5154bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5155bad43ca8SEric Dumazet {
51563d861f66SEric Dumazet 	if (head_stolen) {
51573d861f66SEric Dumazet 		skb_release_head_state(skb);
5158bad43ca8SEric Dumazet 		kmem_cache_free(skbuff_head_cache, skb);
51593d861f66SEric Dumazet 	} else {
5160bad43ca8SEric Dumazet 		__kfree_skb(skb);
5161bad43ca8SEric Dumazet 	}
51623d861f66SEric Dumazet }
5163bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial);
5164bad43ca8SEric Dumazet 
5165bad43ca8SEric Dumazet /**
5166bad43ca8SEric Dumazet  * skb_try_coalesce - try to merge skb to prior one
5167bad43ca8SEric Dumazet  * @to: prior buffer
5168bad43ca8SEric Dumazet  * @from: buffer to add
5169bad43ca8SEric Dumazet  * @fragstolen: pointer to boolean
5170c6c4b97cSRandy Dunlap  * @delta_truesize: how much more was allocated than was requested
5171bad43ca8SEric Dumazet  */
5172bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5173bad43ca8SEric Dumazet 		      bool *fragstolen, int *delta_truesize)
5174bad43ca8SEric Dumazet {
5175c818fa9eSEric Dumazet 	struct skb_shared_info *to_shinfo, *from_shinfo;
5176bad43ca8SEric Dumazet 	int i, delta, len = from->len;
5177bad43ca8SEric Dumazet 
5178bad43ca8SEric Dumazet 	*fragstolen = false;
5179bad43ca8SEric Dumazet 
5180bad43ca8SEric Dumazet 	if (skb_cloned(to))
5181bad43ca8SEric Dumazet 		return false;
5182bad43ca8SEric Dumazet 
5183bad43ca8SEric Dumazet 	if (len <= skb_tailroom(to)) {
5184e93a0435SEric Dumazet 		if (len)
5185bad43ca8SEric Dumazet 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5186bad43ca8SEric Dumazet 		*delta_truesize = 0;
5187bad43ca8SEric Dumazet 		return true;
5188bad43ca8SEric Dumazet 	}
5189bad43ca8SEric Dumazet 
5190c818fa9eSEric Dumazet 	to_shinfo = skb_shinfo(to);
5191c818fa9eSEric Dumazet 	from_shinfo = skb_shinfo(from);
5192c818fa9eSEric Dumazet 	if (to_shinfo->frag_list || from_shinfo->frag_list)
5193bad43ca8SEric Dumazet 		return false;
51941f8b977aSWillem de Bruijn 	if (skb_zcopy(to) || skb_zcopy(from))
51951f8b977aSWillem de Bruijn 		return false;
5196bad43ca8SEric Dumazet 
5197bad43ca8SEric Dumazet 	if (skb_headlen(from) != 0) {
5198bad43ca8SEric Dumazet 		struct page *page;
5199bad43ca8SEric Dumazet 		unsigned int offset;
5200bad43ca8SEric Dumazet 
5201c818fa9eSEric Dumazet 		if (to_shinfo->nr_frags +
5202c818fa9eSEric Dumazet 		    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5203bad43ca8SEric Dumazet 			return false;
5204bad43ca8SEric Dumazet 
5205bad43ca8SEric Dumazet 		if (skb_head_is_locked(from))
5206bad43ca8SEric Dumazet 			return false;
5207bad43ca8SEric Dumazet 
5208bad43ca8SEric Dumazet 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5209bad43ca8SEric Dumazet 
5210bad43ca8SEric Dumazet 		page = virt_to_head_page(from->head);
5211bad43ca8SEric Dumazet 		offset = from->data - (unsigned char *)page_address(page);
5212bad43ca8SEric Dumazet 
5213c818fa9eSEric Dumazet 		skb_fill_page_desc(to, to_shinfo->nr_frags,
5214bad43ca8SEric Dumazet 				   page, offset, skb_headlen(from));
5215bad43ca8SEric Dumazet 		*fragstolen = true;
5216bad43ca8SEric Dumazet 	} else {
5217c818fa9eSEric Dumazet 		if (to_shinfo->nr_frags +
5218c818fa9eSEric Dumazet 		    from_shinfo->nr_frags > MAX_SKB_FRAGS)
5219bad43ca8SEric Dumazet 			return false;
5220bad43ca8SEric Dumazet 
5221f4b549a5SWeiping Pan 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5222bad43ca8SEric Dumazet 	}
5223bad43ca8SEric Dumazet 
5224bad43ca8SEric Dumazet 	WARN_ON_ONCE(delta < len);
5225bad43ca8SEric Dumazet 
5226c818fa9eSEric Dumazet 	memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5227c818fa9eSEric Dumazet 	       from_shinfo->frags,
5228c818fa9eSEric Dumazet 	       from_shinfo->nr_frags * sizeof(skb_frag_t));
5229c818fa9eSEric Dumazet 	to_shinfo->nr_frags += from_shinfo->nr_frags;
5230bad43ca8SEric Dumazet 
5231bad43ca8SEric Dumazet 	if (!skb_cloned(from))
5232c818fa9eSEric Dumazet 		from_shinfo->nr_frags = 0;
5233bad43ca8SEric Dumazet 
52348ea853fdSLi RongQing 	/* if the skb is not cloned this does nothing
52358ea853fdSLi RongQing 	 * since we set nr_frags to 0.
52368ea853fdSLi RongQing 	 */
5237c818fa9eSEric Dumazet 	for (i = 0; i < from_shinfo->nr_frags; i++)
5238c818fa9eSEric Dumazet 		__skb_frag_ref(&from_shinfo->frags[i]);
5239bad43ca8SEric Dumazet 
5240bad43ca8SEric Dumazet 	to->truesize += delta;
5241bad43ca8SEric Dumazet 	to->len += len;
5242bad43ca8SEric Dumazet 	to->data_len += len;
5243bad43ca8SEric Dumazet 
5244bad43ca8SEric Dumazet 	*delta_truesize = delta;
5245bad43ca8SEric Dumazet 	return true;
5246bad43ca8SEric Dumazet }
5247bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce);
5248621e84d6SNicolas Dichtel 
5249621e84d6SNicolas Dichtel /**
52508b27f277SNicolas Dichtel  * skb_scrub_packet - scrub an skb
5251621e84d6SNicolas Dichtel  *
5252621e84d6SNicolas Dichtel  * @skb: buffer to clean
52538b27f277SNicolas Dichtel  * @xnet: packet is crossing netns
5254621e84d6SNicolas Dichtel  *
52558b27f277SNicolas Dichtel  * skb_scrub_packet can be used after encapsulating or decapsulting a packet
52568b27f277SNicolas Dichtel  * into/from a tunnel. Some information have to be cleared during these
52578b27f277SNicolas Dichtel  * operations.
52588b27f277SNicolas Dichtel  * skb_scrub_packet can also be used to clean a skb before injecting it in
52598b27f277SNicolas Dichtel  * another namespace (@xnet == true). We have to clear all information in the
52608b27f277SNicolas Dichtel  * skb that could impact namespace isolation.
5261621e84d6SNicolas Dichtel  */
52628b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5263621e84d6SNicolas Dichtel {
5264621e84d6SNicolas Dichtel 	skb->pkt_type = PACKET_HOST;
5265621e84d6SNicolas Dichtel 	skb->skb_iif = 0;
526660ff7467SWANG Cong 	skb->ignore_df = 0;
5267621e84d6SNicolas Dichtel 	skb_dst_drop(skb);
5268174e2381SFlorian Westphal 	skb_ext_reset(skb);
5269895b5c9fSFlorian Westphal 	nf_reset_ct(skb);
5270621e84d6SNicolas Dichtel 	nf_reset_trace(skb);
5271213dd74aSHerbert Xu 
52726f9a5069SPetr Machata #ifdef CONFIG_NET_SWITCHDEV
52736f9a5069SPetr Machata 	skb->offload_fwd_mark = 0;
5274875e8939SIdo Schimmel 	skb->offload_l3_fwd_mark = 0;
52756f9a5069SPetr Machata #endif
52766f9a5069SPetr Machata 
5277213dd74aSHerbert Xu 	if (!xnet)
5278213dd74aSHerbert Xu 		return;
5279213dd74aSHerbert Xu 
52802b5ec1a5SYe Yin 	ipvs_reset(skb);
5281213dd74aSHerbert Xu 	skb->mark = 0;
5282c47d8c2fSJesus Sanchez-Palencia 	skb->tstamp = 0;
5283621e84d6SNicolas Dichtel }
5284621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet);
5285de960aa9SFlorian Westphal 
5286de960aa9SFlorian Westphal /**
5287de960aa9SFlorian Westphal  * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5288de960aa9SFlorian Westphal  *
5289de960aa9SFlorian Westphal  * @skb: GSO skb
5290de960aa9SFlorian Westphal  *
5291de960aa9SFlorian Westphal  * skb_gso_transport_seglen is used to determine the real size of the
5292de960aa9SFlorian Westphal  * individual segments, including Layer4 headers (TCP/UDP).
5293de960aa9SFlorian Westphal  *
5294de960aa9SFlorian Westphal  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5295de960aa9SFlorian Westphal  */
5296a4a77718SDaniel Axtens static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5297de960aa9SFlorian Westphal {
5298de960aa9SFlorian Westphal 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
5299f993bc25SFlorian Westphal 	unsigned int thlen = 0;
5300f993bc25SFlorian Westphal 
5301f993bc25SFlorian Westphal 	if (skb->encapsulation) {
5302f993bc25SFlorian Westphal 		thlen = skb_inner_transport_header(skb) -
5303f993bc25SFlorian Westphal 			skb_transport_header(skb);
5304de960aa9SFlorian Westphal 
5305de960aa9SFlorian Westphal 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5306f993bc25SFlorian Westphal 			thlen += inner_tcp_hdrlen(skb);
5307f993bc25SFlorian Westphal 	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5308f993bc25SFlorian Westphal 		thlen = tcp_hdrlen(skb);
53091dd27cdeSDaniel Axtens 	} else if (unlikely(skb_is_gso_sctp(skb))) {
531090017accSMarcelo Ricardo Leitner 		thlen = sizeof(struct sctphdr);
5311ee80d1ebSWillem de Bruijn 	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5312ee80d1ebSWillem de Bruijn 		thlen = sizeof(struct udphdr);
5313f993bc25SFlorian Westphal 	}
53146d39d589SFlorian Westphal 	/* UFO sets gso_size to the size of the fragmentation
53156d39d589SFlorian Westphal 	 * payload, i.e. the size of the L4 (UDP) header is already
53166d39d589SFlorian Westphal 	 * accounted for.
53176d39d589SFlorian Westphal 	 */
5318f993bc25SFlorian Westphal 	return thlen + shinfo->gso_size;
5319de960aa9SFlorian Westphal }
5320a4a77718SDaniel Axtens 
5321a4a77718SDaniel Axtens /**
5322a4a77718SDaniel Axtens  * skb_gso_network_seglen - Return length of individual segments of a gso packet
5323a4a77718SDaniel Axtens  *
5324a4a77718SDaniel Axtens  * @skb: GSO skb
5325a4a77718SDaniel Axtens  *
5326a4a77718SDaniel Axtens  * skb_gso_network_seglen is used to determine the real size of the
5327a4a77718SDaniel Axtens  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5328a4a77718SDaniel Axtens  *
5329a4a77718SDaniel Axtens  * The MAC/L2 header is not accounted for.
5330a4a77718SDaniel Axtens  */
5331a4a77718SDaniel Axtens static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5332a4a77718SDaniel Axtens {
5333a4a77718SDaniel Axtens 	unsigned int hdr_len = skb_transport_header(skb) -
5334a4a77718SDaniel Axtens 			       skb_network_header(skb);
5335a4a77718SDaniel Axtens 
5336a4a77718SDaniel Axtens 	return hdr_len + skb_gso_transport_seglen(skb);
5337a4a77718SDaniel Axtens }
5338a4a77718SDaniel Axtens 
5339a4a77718SDaniel Axtens /**
5340a4a77718SDaniel Axtens  * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5341a4a77718SDaniel Axtens  *
5342a4a77718SDaniel Axtens  * @skb: GSO skb
5343a4a77718SDaniel Axtens  *
5344a4a77718SDaniel Axtens  * skb_gso_mac_seglen is used to determine the real size of the
5345a4a77718SDaniel Axtens  * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5346a4a77718SDaniel Axtens  * headers (TCP/UDP).
5347a4a77718SDaniel Axtens  */
5348a4a77718SDaniel Axtens static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5349a4a77718SDaniel Axtens {
5350a4a77718SDaniel Axtens 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5351a4a77718SDaniel Axtens 
5352a4a77718SDaniel Axtens 	return hdr_len + skb_gso_transport_seglen(skb);
5353a4a77718SDaniel Axtens }
53540d5501c1SVlad Yasevich 
5355ae7ef81eSMarcelo Ricardo Leitner /**
53562b16f048SDaniel Axtens  * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
53572b16f048SDaniel Axtens  *
53582b16f048SDaniel Axtens  * There are a couple of instances where we have a GSO skb, and we
53592b16f048SDaniel Axtens  * want to determine what size it would be after it is segmented.
53602b16f048SDaniel Axtens  *
53612b16f048SDaniel Axtens  * We might want to check:
53622b16f048SDaniel Axtens  * -    L3+L4+payload size (e.g. IP forwarding)
53632b16f048SDaniel Axtens  * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
53642b16f048SDaniel Axtens  *
53652b16f048SDaniel Axtens  * This is a helper to do that correctly considering GSO_BY_FRAGS.
53662b16f048SDaniel Axtens  *
536749682bfaSMathieu Malaterre  * @skb: GSO skb
536849682bfaSMathieu Malaterre  *
53692b16f048SDaniel Axtens  * @seg_len: The segmented length (from skb_gso_*_seglen). In the
53702b16f048SDaniel Axtens  *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
53712b16f048SDaniel Axtens  *
53722b16f048SDaniel Axtens  * @max_len: The maximum permissible length.
53732b16f048SDaniel Axtens  *
53742b16f048SDaniel Axtens  * Returns true if the segmented length <= max length.
53752b16f048SDaniel Axtens  */
53762b16f048SDaniel Axtens static inline bool skb_gso_size_check(const struct sk_buff *skb,
53772b16f048SDaniel Axtens 				      unsigned int seg_len,
53782b16f048SDaniel Axtens 				      unsigned int max_len) {
53792b16f048SDaniel Axtens 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
53802b16f048SDaniel Axtens 	const struct sk_buff *iter;
53812b16f048SDaniel Axtens 
53822b16f048SDaniel Axtens 	if (shinfo->gso_size != GSO_BY_FRAGS)
53832b16f048SDaniel Axtens 		return seg_len <= max_len;
53842b16f048SDaniel Axtens 
53852b16f048SDaniel Axtens 	/* Undo this so we can re-use header sizes */
53862b16f048SDaniel Axtens 	seg_len -= GSO_BY_FRAGS;
53872b16f048SDaniel Axtens 
53882b16f048SDaniel Axtens 	skb_walk_frags(skb, iter) {
53892b16f048SDaniel Axtens 		if (seg_len + skb_headlen(iter) > max_len)
53902b16f048SDaniel Axtens 			return false;
53912b16f048SDaniel Axtens 	}
53922b16f048SDaniel Axtens 
53932b16f048SDaniel Axtens 	return true;
53942b16f048SDaniel Axtens }
53952b16f048SDaniel Axtens 
53962b16f048SDaniel Axtens /**
5397779b7931SDaniel Axtens  * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5398ae7ef81eSMarcelo Ricardo Leitner  *
5399ae7ef81eSMarcelo Ricardo Leitner  * @skb: GSO skb
540076f21b99SDavid S. Miller  * @mtu: MTU to validate against
5401ae7ef81eSMarcelo Ricardo Leitner  *
5402779b7931SDaniel Axtens  * skb_gso_validate_network_len validates if a given skb will fit a
5403779b7931SDaniel Axtens  * wanted MTU once split. It considers L3 headers, L4 headers, and the
5404779b7931SDaniel Axtens  * payload.
5405ae7ef81eSMarcelo Ricardo Leitner  */
5406779b7931SDaniel Axtens bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5407ae7ef81eSMarcelo Ricardo Leitner {
54082b16f048SDaniel Axtens 	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5409ae7ef81eSMarcelo Ricardo Leitner }
5410779b7931SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5411ae7ef81eSMarcelo Ricardo Leitner 
54122b16f048SDaniel Axtens /**
54132b16f048SDaniel Axtens  * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
54142b16f048SDaniel Axtens  *
54152b16f048SDaniel Axtens  * @skb: GSO skb
54162b16f048SDaniel Axtens  * @len: length to validate against
54172b16f048SDaniel Axtens  *
54182b16f048SDaniel Axtens  * skb_gso_validate_mac_len validates if a given skb will fit a wanted
54192b16f048SDaniel Axtens  * length once split, including L2, L3 and L4 headers and the payload.
54202b16f048SDaniel Axtens  */
54212b16f048SDaniel Axtens bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
54222b16f048SDaniel Axtens {
54232b16f048SDaniel Axtens 	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
54242b16f048SDaniel Axtens }
54252b16f048SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
54262b16f048SDaniel Axtens 
54270d5501c1SVlad Yasevich static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
54280d5501c1SVlad Yasevich {
5429d85e8be2SYuya Kusakabe 	int mac_len, meta_len;
5430d85e8be2SYuya Kusakabe 	void *meta;
54314bbb3e0eSToshiaki Makita 
54320d5501c1SVlad Yasevich 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
54330d5501c1SVlad Yasevich 		kfree_skb(skb);
54340d5501c1SVlad Yasevich 		return NULL;
54350d5501c1SVlad Yasevich 	}
54360d5501c1SVlad Yasevich 
54374bbb3e0eSToshiaki Makita 	mac_len = skb->data - skb_mac_header(skb);
5438ae474573SToshiaki Makita 	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
54394bbb3e0eSToshiaki Makita 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
54404bbb3e0eSToshiaki Makita 			mac_len - VLAN_HLEN - ETH_TLEN);
5441ae474573SToshiaki Makita 	}
5442d85e8be2SYuya Kusakabe 
5443d85e8be2SYuya Kusakabe 	meta_len = skb_metadata_len(skb);
5444d85e8be2SYuya Kusakabe 	if (meta_len) {
5445d85e8be2SYuya Kusakabe 		meta = skb_metadata_end(skb) - meta_len;
5446d85e8be2SYuya Kusakabe 		memmove(meta + VLAN_HLEN, meta, meta_len);
5447d85e8be2SYuya Kusakabe 	}
5448d85e8be2SYuya Kusakabe 
54490d5501c1SVlad Yasevich 	skb->mac_header += VLAN_HLEN;
54500d5501c1SVlad Yasevich 	return skb;
54510d5501c1SVlad Yasevich }
54520d5501c1SVlad Yasevich 
54530d5501c1SVlad Yasevich struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
54540d5501c1SVlad Yasevich {
54550d5501c1SVlad Yasevich 	struct vlan_hdr *vhdr;
54560d5501c1SVlad Yasevich 	u16 vlan_tci;
54570d5501c1SVlad Yasevich 
5458df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
54590d5501c1SVlad Yasevich 		/* vlan_tci is already set-up so leave this for another time */
54600d5501c1SVlad Yasevich 		return skb;
54610d5501c1SVlad Yasevich 	}
54620d5501c1SVlad Yasevich 
54630d5501c1SVlad Yasevich 	skb = skb_share_check(skb, GFP_ATOMIC);
54640d5501c1SVlad Yasevich 	if (unlikely(!skb))
54650d5501c1SVlad Yasevich 		goto err_free;
546655eff0ebSMiaohe Lin 	/* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
546755eff0ebSMiaohe Lin 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
54680d5501c1SVlad Yasevich 		goto err_free;
54690d5501c1SVlad Yasevich 
54700d5501c1SVlad Yasevich 	vhdr = (struct vlan_hdr *)skb->data;
54710d5501c1SVlad Yasevich 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
54720d5501c1SVlad Yasevich 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
54730d5501c1SVlad Yasevich 
54740d5501c1SVlad Yasevich 	skb_pull_rcsum(skb, VLAN_HLEN);
54750d5501c1SVlad Yasevich 	vlan_set_encap_proto(skb, vhdr);
54760d5501c1SVlad Yasevich 
54770d5501c1SVlad Yasevich 	skb = skb_reorder_vlan_header(skb);
54780d5501c1SVlad Yasevich 	if (unlikely(!skb))
54790d5501c1SVlad Yasevich 		goto err_free;
54800d5501c1SVlad Yasevich 
54810d5501c1SVlad Yasevich 	skb_reset_network_header(skb);
54828be33ecfSAlexander Lobakin 	if (!skb_transport_header_was_set(skb))
54830d5501c1SVlad Yasevich 		skb_reset_transport_header(skb);
54840d5501c1SVlad Yasevich 	skb_reset_mac_len(skb);
54850d5501c1SVlad Yasevich 
54860d5501c1SVlad Yasevich 	return skb;
54870d5501c1SVlad Yasevich 
54880d5501c1SVlad Yasevich err_free:
54890d5501c1SVlad Yasevich 	kfree_skb(skb);
54900d5501c1SVlad Yasevich 	return NULL;
54910d5501c1SVlad Yasevich }
54920d5501c1SVlad Yasevich EXPORT_SYMBOL(skb_vlan_untag);
54932e4e4410SEric Dumazet 
5494e2195121SJiri Pirko int skb_ensure_writable(struct sk_buff *skb, int write_len)
5495e2195121SJiri Pirko {
5496e2195121SJiri Pirko 	if (!pskb_may_pull(skb, write_len))
5497e2195121SJiri Pirko 		return -ENOMEM;
5498e2195121SJiri Pirko 
5499e2195121SJiri Pirko 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5500e2195121SJiri Pirko 		return 0;
5501e2195121SJiri Pirko 
5502e2195121SJiri Pirko 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5503e2195121SJiri Pirko }
5504e2195121SJiri Pirko EXPORT_SYMBOL(skb_ensure_writable);
5505e2195121SJiri Pirko 
5506bfca4c52SShmulik Ladkani /* remove VLAN header from packet and update csum accordingly.
5507bfca4c52SShmulik Ladkani  * expects a non skb_vlan_tag_present skb with a vlan tag payload
5508bfca4c52SShmulik Ladkani  */
5509bfca4c52SShmulik Ladkani int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
551093515d53SJiri Pirko {
551193515d53SJiri Pirko 	struct vlan_hdr *vhdr;
5512b6a79208SShmulik Ladkani 	int offset = skb->data - skb_mac_header(skb);
551393515d53SJiri Pirko 	int err;
551493515d53SJiri Pirko 
5515b6a79208SShmulik Ladkani 	if (WARN_ONCE(offset,
5516b6a79208SShmulik Ladkani 		      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5517b6a79208SShmulik Ladkani 		      offset)) {
5518b6a79208SShmulik Ladkani 		return -EINVAL;
5519b6a79208SShmulik Ladkani 	}
5520b6a79208SShmulik Ladkani 
552193515d53SJiri Pirko 	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
552293515d53SJiri Pirko 	if (unlikely(err))
5523b6a79208SShmulik Ladkani 		return err;
552493515d53SJiri Pirko 
552593515d53SJiri Pirko 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
552693515d53SJiri Pirko 
552793515d53SJiri Pirko 	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
552893515d53SJiri Pirko 	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
552993515d53SJiri Pirko 
553093515d53SJiri Pirko 	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
553193515d53SJiri Pirko 	__skb_pull(skb, VLAN_HLEN);
553293515d53SJiri Pirko 
553393515d53SJiri Pirko 	vlan_set_encap_proto(skb, vhdr);
553493515d53SJiri Pirko 	skb->mac_header += VLAN_HLEN;
553593515d53SJiri Pirko 
553693515d53SJiri Pirko 	if (skb_network_offset(skb) < ETH_HLEN)
553793515d53SJiri Pirko 		skb_set_network_header(skb, ETH_HLEN);
553893515d53SJiri Pirko 
553993515d53SJiri Pirko 	skb_reset_mac_len(skb);
554093515d53SJiri Pirko 
554193515d53SJiri Pirko 	return err;
554293515d53SJiri Pirko }
5543bfca4c52SShmulik Ladkani EXPORT_SYMBOL(__skb_vlan_pop);
554493515d53SJiri Pirko 
5545b6a79208SShmulik Ladkani /* Pop a vlan tag either from hwaccel or from payload.
5546b6a79208SShmulik Ladkani  * Expects skb->data at mac header.
5547b6a79208SShmulik Ladkani  */
554893515d53SJiri Pirko int skb_vlan_pop(struct sk_buff *skb)
554993515d53SJiri Pirko {
555093515d53SJiri Pirko 	u16 vlan_tci;
555193515d53SJiri Pirko 	__be16 vlan_proto;
555293515d53SJiri Pirko 	int err;
555393515d53SJiri Pirko 
5554df8a39deSJiri Pirko 	if (likely(skb_vlan_tag_present(skb))) {
5555b1817524SMichał Mirosław 		__vlan_hwaccel_clear_tag(skb);
555693515d53SJiri Pirko 	} else {
5557ecf4ee41SShmulik Ladkani 		if (unlikely(!eth_type_vlan(skb->protocol)))
555893515d53SJiri Pirko 			return 0;
555993515d53SJiri Pirko 
556093515d53SJiri Pirko 		err = __skb_vlan_pop(skb, &vlan_tci);
556193515d53SJiri Pirko 		if (err)
556293515d53SJiri Pirko 			return err;
556393515d53SJiri Pirko 	}
556493515d53SJiri Pirko 	/* move next vlan tag to hw accel tag */
5565ecf4ee41SShmulik Ladkani 	if (likely(!eth_type_vlan(skb->protocol)))
556693515d53SJiri Pirko 		return 0;
556793515d53SJiri Pirko 
556893515d53SJiri Pirko 	vlan_proto = skb->protocol;
556993515d53SJiri Pirko 	err = __skb_vlan_pop(skb, &vlan_tci);
557093515d53SJiri Pirko 	if (unlikely(err))
557193515d53SJiri Pirko 		return err;
557293515d53SJiri Pirko 
557393515d53SJiri Pirko 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
557493515d53SJiri Pirko 	return 0;
557593515d53SJiri Pirko }
557693515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_pop);
557793515d53SJiri Pirko 
5578b6a79208SShmulik Ladkani /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5579b6a79208SShmulik Ladkani  * Expects skb->data at mac header.
5580b6a79208SShmulik Ladkani  */
558193515d53SJiri Pirko int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
558293515d53SJiri Pirko {
5583df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
5584b6a79208SShmulik Ladkani 		int offset = skb->data - skb_mac_header(skb);
558593515d53SJiri Pirko 		int err;
558693515d53SJiri Pirko 
5587b6a79208SShmulik Ladkani 		if (WARN_ONCE(offset,
5588b6a79208SShmulik Ladkani 			      "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5589b6a79208SShmulik Ladkani 			      offset)) {
5590b6a79208SShmulik Ladkani 			return -EINVAL;
5591b6a79208SShmulik Ladkani 		}
5592b6a79208SShmulik Ladkani 
559393515d53SJiri Pirko 		err = __vlan_insert_tag(skb, skb->vlan_proto,
5594df8a39deSJiri Pirko 					skb_vlan_tag_get(skb));
5595b6a79208SShmulik Ladkani 		if (err)
559693515d53SJiri Pirko 			return err;
55979241e2dfSDaniel Borkmann 
559893515d53SJiri Pirko 		skb->protocol = skb->vlan_proto;
559993515d53SJiri Pirko 		skb->mac_len += VLAN_HLEN;
560093515d53SJiri Pirko 
56016b83d28aSDaniel Borkmann 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
560293515d53SJiri Pirko 	}
560393515d53SJiri Pirko 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
560493515d53SJiri Pirko 	return 0;
560593515d53SJiri Pirko }
560693515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_push);
560793515d53SJiri Pirko 
560819fbcb36SGuillaume Nault /**
560919fbcb36SGuillaume Nault  * skb_eth_pop() - Drop the Ethernet header at the head of a packet
561019fbcb36SGuillaume Nault  *
561119fbcb36SGuillaume Nault  * @skb: Socket buffer to modify
561219fbcb36SGuillaume Nault  *
561319fbcb36SGuillaume Nault  * Drop the Ethernet header of @skb.
561419fbcb36SGuillaume Nault  *
561519fbcb36SGuillaume Nault  * Expects that skb->data points to the mac header and that no VLAN tags are
561619fbcb36SGuillaume Nault  * present.
561719fbcb36SGuillaume Nault  *
561819fbcb36SGuillaume Nault  * Returns 0 on success, -errno otherwise.
561919fbcb36SGuillaume Nault  */
562019fbcb36SGuillaume Nault int skb_eth_pop(struct sk_buff *skb)
562119fbcb36SGuillaume Nault {
562219fbcb36SGuillaume Nault 	if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
562319fbcb36SGuillaume Nault 	    skb_network_offset(skb) < ETH_HLEN)
562419fbcb36SGuillaume Nault 		return -EPROTO;
562519fbcb36SGuillaume Nault 
562619fbcb36SGuillaume Nault 	skb_pull_rcsum(skb, ETH_HLEN);
562719fbcb36SGuillaume Nault 	skb_reset_mac_header(skb);
562819fbcb36SGuillaume Nault 	skb_reset_mac_len(skb);
562919fbcb36SGuillaume Nault 
563019fbcb36SGuillaume Nault 	return 0;
563119fbcb36SGuillaume Nault }
563219fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_pop);
563319fbcb36SGuillaume Nault 
563419fbcb36SGuillaume Nault /**
563519fbcb36SGuillaume Nault  * skb_eth_push() - Add a new Ethernet header at the head of a packet
563619fbcb36SGuillaume Nault  *
563719fbcb36SGuillaume Nault  * @skb: Socket buffer to modify
563819fbcb36SGuillaume Nault  * @dst: Destination MAC address of the new header
563919fbcb36SGuillaume Nault  * @src: Source MAC address of the new header
564019fbcb36SGuillaume Nault  *
564119fbcb36SGuillaume Nault  * Prepend @skb with a new Ethernet header.
564219fbcb36SGuillaume Nault  *
564319fbcb36SGuillaume Nault  * Expects that skb->data points to the mac header, which must be empty.
564419fbcb36SGuillaume Nault  *
564519fbcb36SGuillaume Nault  * Returns 0 on success, -errno otherwise.
564619fbcb36SGuillaume Nault  */
564719fbcb36SGuillaume Nault int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
564819fbcb36SGuillaume Nault 		 const unsigned char *src)
564919fbcb36SGuillaume Nault {
565019fbcb36SGuillaume Nault 	struct ethhdr *eth;
565119fbcb36SGuillaume Nault 	int err;
565219fbcb36SGuillaume Nault 
565319fbcb36SGuillaume Nault 	if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
565419fbcb36SGuillaume Nault 		return -EPROTO;
565519fbcb36SGuillaume Nault 
565619fbcb36SGuillaume Nault 	err = skb_cow_head(skb, sizeof(*eth));
565719fbcb36SGuillaume Nault 	if (err < 0)
565819fbcb36SGuillaume Nault 		return err;
565919fbcb36SGuillaume Nault 
566019fbcb36SGuillaume Nault 	skb_push(skb, sizeof(*eth));
566119fbcb36SGuillaume Nault 	skb_reset_mac_header(skb);
566219fbcb36SGuillaume Nault 	skb_reset_mac_len(skb);
566319fbcb36SGuillaume Nault 
566419fbcb36SGuillaume Nault 	eth = eth_hdr(skb);
566519fbcb36SGuillaume Nault 	ether_addr_copy(eth->h_dest, dst);
566619fbcb36SGuillaume Nault 	ether_addr_copy(eth->h_source, src);
566719fbcb36SGuillaume Nault 	eth->h_proto = skb->protocol;
566819fbcb36SGuillaume Nault 
566919fbcb36SGuillaume Nault 	skb_postpush_rcsum(skb, eth, sizeof(*eth));
567019fbcb36SGuillaume Nault 
567119fbcb36SGuillaume Nault 	return 0;
567219fbcb36SGuillaume Nault }
567319fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_push);
567419fbcb36SGuillaume Nault 
56758822e270SJohn Hurley /* Update the ethertype of hdr and the skb csum value if required. */
56768822e270SJohn Hurley static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
56778822e270SJohn Hurley 			     __be16 ethertype)
56788822e270SJohn Hurley {
56798822e270SJohn Hurley 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
56808822e270SJohn Hurley 		__be16 diff[] = { ~hdr->h_proto, ethertype };
56818822e270SJohn Hurley 
56828822e270SJohn Hurley 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
56838822e270SJohn Hurley 	}
56848822e270SJohn Hurley 
56858822e270SJohn Hurley 	hdr->h_proto = ethertype;
56868822e270SJohn Hurley }
56878822e270SJohn Hurley 
56888822e270SJohn Hurley /**
5689e7dbfed1SMartin Varghese  * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
5690e7dbfed1SMartin Varghese  *                   the packet
56918822e270SJohn Hurley  *
56928822e270SJohn Hurley  * @skb: buffer
56938822e270SJohn Hurley  * @mpls_lse: MPLS label stack entry to push
56948822e270SJohn Hurley  * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5695fa4e0f88SDavide Caratti  * @mac_len: length of the MAC header
5696e7dbfed1SMartin Varghese  * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
5697e7dbfed1SMartin Varghese  *            ethernet
56988822e270SJohn Hurley  *
56998822e270SJohn Hurley  * Expects skb->data at mac header.
57008822e270SJohn Hurley  *
57018822e270SJohn Hurley  * Returns 0 on success, -errno otherwise.
57028822e270SJohn Hurley  */
5703fa4e0f88SDavide Caratti int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5704d04ac224SMartin Varghese 		  int mac_len, bool ethernet)
57058822e270SJohn Hurley {
57068822e270SJohn Hurley 	struct mpls_shim_hdr *lse;
57078822e270SJohn Hurley 	int err;
57088822e270SJohn Hurley 
57098822e270SJohn Hurley 	if (unlikely(!eth_p_mpls(mpls_proto)))
57108822e270SJohn Hurley 		return -EINVAL;
57118822e270SJohn Hurley 
57128822e270SJohn Hurley 	/* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
57138822e270SJohn Hurley 	if (skb->encapsulation)
57148822e270SJohn Hurley 		return -EINVAL;
57158822e270SJohn Hurley 
57168822e270SJohn Hurley 	err = skb_cow_head(skb, MPLS_HLEN);
57178822e270SJohn Hurley 	if (unlikely(err))
57188822e270SJohn Hurley 		return err;
57198822e270SJohn Hurley 
57208822e270SJohn Hurley 	if (!skb->inner_protocol) {
5721e7dbfed1SMartin Varghese 		skb_set_inner_network_header(skb, skb_network_offset(skb));
57228822e270SJohn Hurley 		skb_set_inner_protocol(skb, skb->protocol);
57238822e270SJohn Hurley 	}
57248822e270SJohn Hurley 
57258822e270SJohn Hurley 	skb_push(skb, MPLS_HLEN);
57268822e270SJohn Hurley 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5727fa4e0f88SDavide Caratti 		mac_len);
57288822e270SJohn Hurley 	skb_reset_mac_header(skb);
5729fa4e0f88SDavide Caratti 	skb_set_network_header(skb, mac_len);
5730e7dbfed1SMartin Varghese 	skb_reset_mac_len(skb);
57318822e270SJohn Hurley 
57328822e270SJohn Hurley 	lse = mpls_hdr(skb);
57338822e270SJohn Hurley 	lse->label_stack_entry = mpls_lse;
57348822e270SJohn Hurley 	skb_postpush_rcsum(skb, lse, MPLS_HLEN);
57358822e270SJohn Hurley 
57364296adc3SGuillaume Nault 	if (ethernet && mac_len >= ETH_HLEN)
57378822e270SJohn Hurley 		skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
57388822e270SJohn Hurley 	skb->protocol = mpls_proto;
57398822e270SJohn Hurley 
57408822e270SJohn Hurley 	return 0;
57418822e270SJohn Hurley }
57428822e270SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_push);
57438822e270SJohn Hurley 
57442e4e4410SEric Dumazet /**
5745ed246ceeSJohn Hurley  * skb_mpls_pop() - pop the outermost MPLS header
5746ed246ceeSJohn Hurley  *
5747ed246ceeSJohn Hurley  * @skb: buffer
5748ed246ceeSJohn Hurley  * @next_proto: ethertype of header after popped MPLS header
5749fa4e0f88SDavide Caratti  * @mac_len: length of the MAC header
575076f99f98SMartin Varghese  * @ethernet: flag to indicate if the packet is ethernet
5751ed246ceeSJohn Hurley  *
5752ed246ceeSJohn Hurley  * Expects skb->data at mac header.
5753ed246ceeSJohn Hurley  *
5754ed246ceeSJohn Hurley  * Returns 0 on success, -errno otherwise.
5755ed246ceeSJohn Hurley  */
5756040b5cfbSMartin Varghese int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5757040b5cfbSMartin Varghese 		 bool ethernet)
5758ed246ceeSJohn Hurley {
5759ed246ceeSJohn Hurley 	int err;
5760ed246ceeSJohn Hurley 
5761ed246ceeSJohn Hurley 	if (unlikely(!eth_p_mpls(skb->protocol)))
5762dedc5a08SDavide Caratti 		return 0;
5763ed246ceeSJohn Hurley 
5764fa4e0f88SDavide Caratti 	err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
5765ed246ceeSJohn Hurley 	if (unlikely(err))
5766ed246ceeSJohn Hurley 		return err;
5767ed246ceeSJohn Hurley 
5768ed246ceeSJohn Hurley 	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5769ed246ceeSJohn Hurley 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
5770fa4e0f88SDavide Caratti 		mac_len);
5771ed246ceeSJohn Hurley 
5772ed246ceeSJohn Hurley 	__skb_pull(skb, MPLS_HLEN);
5773ed246ceeSJohn Hurley 	skb_reset_mac_header(skb);
5774fa4e0f88SDavide Caratti 	skb_set_network_header(skb, mac_len);
5775ed246ceeSJohn Hurley 
57764296adc3SGuillaume Nault 	if (ethernet && mac_len >= ETH_HLEN) {
5777ed246ceeSJohn Hurley 		struct ethhdr *hdr;
5778ed246ceeSJohn Hurley 
5779ed246ceeSJohn Hurley 		/* use mpls_hdr() to get ethertype to account for VLANs. */
5780ed246ceeSJohn Hurley 		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5781ed246ceeSJohn Hurley 		skb_mod_eth_type(skb, hdr, next_proto);
5782ed246ceeSJohn Hurley 	}
5783ed246ceeSJohn Hurley 	skb->protocol = next_proto;
5784ed246ceeSJohn Hurley 
5785ed246ceeSJohn Hurley 	return 0;
5786ed246ceeSJohn Hurley }
5787ed246ceeSJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_pop);
5788ed246ceeSJohn Hurley 
5789ed246ceeSJohn Hurley /**
5790d27cf5c5SJohn Hurley  * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5791d27cf5c5SJohn Hurley  *
5792d27cf5c5SJohn Hurley  * @skb: buffer
5793d27cf5c5SJohn Hurley  * @mpls_lse: new MPLS label stack entry to update to
5794d27cf5c5SJohn Hurley  *
5795d27cf5c5SJohn Hurley  * Expects skb->data at mac header.
5796d27cf5c5SJohn Hurley  *
5797d27cf5c5SJohn Hurley  * Returns 0 on success, -errno otherwise.
5798d27cf5c5SJohn Hurley  */
5799d27cf5c5SJohn Hurley int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5800d27cf5c5SJohn Hurley {
5801d27cf5c5SJohn Hurley 	int err;
5802d27cf5c5SJohn Hurley 
5803d27cf5c5SJohn Hurley 	if (unlikely(!eth_p_mpls(skb->protocol)))
5804d27cf5c5SJohn Hurley 		return -EINVAL;
5805d27cf5c5SJohn Hurley 
5806d27cf5c5SJohn Hurley 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5807d27cf5c5SJohn Hurley 	if (unlikely(err))
5808d27cf5c5SJohn Hurley 		return err;
5809d27cf5c5SJohn Hurley 
5810d27cf5c5SJohn Hurley 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
5811d27cf5c5SJohn Hurley 		__be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5812d27cf5c5SJohn Hurley 
5813d27cf5c5SJohn Hurley 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5814d27cf5c5SJohn Hurley 	}
5815d27cf5c5SJohn Hurley 
5816d27cf5c5SJohn Hurley 	mpls_hdr(skb)->label_stack_entry = mpls_lse;
5817d27cf5c5SJohn Hurley 
5818d27cf5c5SJohn Hurley 	return 0;
5819d27cf5c5SJohn Hurley }
5820d27cf5c5SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5821d27cf5c5SJohn Hurley 
5822d27cf5c5SJohn Hurley /**
58232a2ea508SJohn Hurley  * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
58242a2ea508SJohn Hurley  *
58252a2ea508SJohn Hurley  * @skb: buffer
58262a2ea508SJohn Hurley  *
58272a2ea508SJohn Hurley  * Expects skb->data at mac header.
58282a2ea508SJohn Hurley  *
58292a2ea508SJohn Hurley  * Returns 0 on success, -errno otherwise.
58302a2ea508SJohn Hurley  */
58312a2ea508SJohn Hurley int skb_mpls_dec_ttl(struct sk_buff *skb)
58322a2ea508SJohn Hurley {
58332a2ea508SJohn Hurley 	u32 lse;
58342a2ea508SJohn Hurley 	u8 ttl;
58352a2ea508SJohn Hurley 
58362a2ea508SJohn Hurley 	if (unlikely(!eth_p_mpls(skb->protocol)))
58372a2ea508SJohn Hurley 		return -EINVAL;
58382a2ea508SJohn Hurley 
583913de4ed9SDavide Caratti 	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
584013de4ed9SDavide Caratti 		return -ENOMEM;
584113de4ed9SDavide Caratti 
58422a2ea508SJohn Hurley 	lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
58432a2ea508SJohn Hurley 	ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
58442a2ea508SJohn Hurley 	if (!--ttl)
58452a2ea508SJohn Hurley 		return -EINVAL;
58462a2ea508SJohn Hurley 
58472a2ea508SJohn Hurley 	lse &= ~MPLS_LS_TTL_MASK;
58482a2ea508SJohn Hurley 	lse |= ttl << MPLS_LS_TTL_SHIFT;
58492a2ea508SJohn Hurley 
58502a2ea508SJohn Hurley 	return skb_mpls_update_lse(skb, cpu_to_be32(lse));
58512a2ea508SJohn Hurley }
58522a2ea508SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
58532a2ea508SJohn Hurley 
58542a2ea508SJohn Hurley /**
58552e4e4410SEric Dumazet  * alloc_skb_with_frags - allocate skb with page frags
58562e4e4410SEric Dumazet  *
5857de3f0d0eSMasanari Iida  * @header_len: size of linear part
5858de3f0d0eSMasanari Iida  * @data_len: needed length in frags
5859de3f0d0eSMasanari Iida  * @max_page_order: max page order desired.
5860de3f0d0eSMasanari Iida  * @errcode: pointer to error code if any
5861de3f0d0eSMasanari Iida  * @gfp_mask: allocation mask
58622e4e4410SEric Dumazet  *
58632e4e4410SEric Dumazet  * This can be used to allocate a paged skb, given a maximal order for frags.
58642e4e4410SEric Dumazet  */
58652e4e4410SEric Dumazet struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
58662e4e4410SEric Dumazet 				     unsigned long data_len,
58672e4e4410SEric Dumazet 				     int max_page_order,
58682e4e4410SEric Dumazet 				     int *errcode,
58692e4e4410SEric Dumazet 				     gfp_t gfp_mask)
58702e4e4410SEric Dumazet {
58712e4e4410SEric Dumazet 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
58722e4e4410SEric Dumazet 	unsigned long chunk;
58732e4e4410SEric Dumazet 	struct sk_buff *skb;
58742e4e4410SEric Dumazet 	struct page *page;
58752e4e4410SEric Dumazet 	int i;
58762e4e4410SEric Dumazet 
58772e4e4410SEric Dumazet 	*errcode = -EMSGSIZE;
58782e4e4410SEric Dumazet 	/* Note this test could be relaxed, if we succeed to allocate
58792e4e4410SEric Dumazet 	 * high order pages...
58802e4e4410SEric Dumazet 	 */
58812e4e4410SEric Dumazet 	if (npages > MAX_SKB_FRAGS)
58822e4e4410SEric Dumazet 		return NULL;
58832e4e4410SEric Dumazet 
58842e4e4410SEric Dumazet 	*errcode = -ENOBUFS;
5885f8c468e8SDavid Rientjes 	skb = alloc_skb(header_len, gfp_mask);
58862e4e4410SEric Dumazet 	if (!skb)
58872e4e4410SEric Dumazet 		return NULL;
58882e4e4410SEric Dumazet 
58892e4e4410SEric Dumazet 	skb->truesize += npages << PAGE_SHIFT;
58902e4e4410SEric Dumazet 
58912e4e4410SEric Dumazet 	for (i = 0; npages > 0; i++) {
58922e4e4410SEric Dumazet 		int order = max_page_order;
58932e4e4410SEric Dumazet 
58942e4e4410SEric Dumazet 		while (order) {
58952e4e4410SEric Dumazet 			if (npages >= 1 << order) {
5896d0164adcSMel Gorman 				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
58972e4e4410SEric Dumazet 						   __GFP_COMP |
5898d14b56f5SMichal Hocko 						   __GFP_NOWARN,
58992e4e4410SEric Dumazet 						   order);
59002e4e4410SEric Dumazet 				if (page)
59012e4e4410SEric Dumazet 					goto fill_page;
59022e4e4410SEric Dumazet 				/* Do not retry other high order allocations */
59032e4e4410SEric Dumazet 				order = 1;
59042e4e4410SEric Dumazet 				max_page_order = 0;
59052e4e4410SEric Dumazet 			}
59062e4e4410SEric Dumazet 			order--;
59072e4e4410SEric Dumazet 		}
59082e4e4410SEric Dumazet 		page = alloc_page(gfp_mask);
59092e4e4410SEric Dumazet 		if (!page)
59102e4e4410SEric Dumazet 			goto failure;
59112e4e4410SEric Dumazet fill_page:
59122e4e4410SEric Dumazet 		chunk = min_t(unsigned long, data_len,
59132e4e4410SEric Dumazet 			      PAGE_SIZE << order);
59142e4e4410SEric Dumazet 		skb_fill_page_desc(skb, i, page, 0, chunk);
59152e4e4410SEric Dumazet 		data_len -= chunk;
59162e4e4410SEric Dumazet 		npages -= 1 << order;
59172e4e4410SEric Dumazet 	}
59182e4e4410SEric Dumazet 	return skb;
59192e4e4410SEric Dumazet 
59202e4e4410SEric Dumazet failure:
59212e4e4410SEric Dumazet 	kfree_skb(skb);
59222e4e4410SEric Dumazet 	return NULL;
59232e4e4410SEric Dumazet }
59242e4e4410SEric Dumazet EXPORT_SYMBOL(alloc_skb_with_frags);
59256fa01ccdSSowmini Varadhan 
59266fa01ccdSSowmini Varadhan /* carve out the first off bytes from skb when off < headlen */
59276fa01ccdSSowmini Varadhan static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
59286fa01ccdSSowmini Varadhan 				    const int headlen, gfp_t gfp_mask)
59296fa01ccdSSowmini Varadhan {
59306fa01ccdSSowmini Varadhan 	int i;
59316fa01ccdSSowmini Varadhan 	int size = skb_end_offset(skb);
59326fa01ccdSSowmini Varadhan 	int new_hlen = headlen - off;
59336fa01ccdSSowmini Varadhan 	u8 *data;
59346fa01ccdSSowmini Varadhan 
59356fa01ccdSSowmini Varadhan 	size = SKB_DATA_ALIGN(size);
59366fa01ccdSSowmini Varadhan 
59376fa01ccdSSowmini Varadhan 	if (skb_pfmemalloc(skb))
59386fa01ccdSSowmini Varadhan 		gfp_mask |= __GFP_MEMALLOC;
59396fa01ccdSSowmini Varadhan 	data = kmalloc_reserve(size +
59406fa01ccdSSowmini Varadhan 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
59416fa01ccdSSowmini Varadhan 			       gfp_mask, NUMA_NO_NODE, NULL);
59426fa01ccdSSowmini Varadhan 	if (!data)
59436fa01ccdSSowmini Varadhan 		return -ENOMEM;
59446fa01ccdSSowmini Varadhan 
59456fa01ccdSSowmini Varadhan 	size = SKB_WITH_OVERHEAD(ksize(data));
59466fa01ccdSSowmini Varadhan 
59476fa01ccdSSowmini Varadhan 	/* Copy real data, and all frags */
59486fa01ccdSSowmini Varadhan 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
59496fa01ccdSSowmini Varadhan 	skb->len -= off;
59506fa01ccdSSowmini Varadhan 
59516fa01ccdSSowmini Varadhan 	memcpy((struct skb_shared_info *)(data + size),
59526fa01ccdSSowmini Varadhan 	       skb_shinfo(skb),
59536fa01ccdSSowmini Varadhan 	       offsetof(struct skb_shared_info,
59546fa01ccdSSowmini Varadhan 			frags[skb_shinfo(skb)->nr_frags]));
59556fa01ccdSSowmini Varadhan 	if (skb_cloned(skb)) {
59566fa01ccdSSowmini Varadhan 		/* drop the old head gracefully */
59576fa01ccdSSowmini Varadhan 		if (skb_orphan_frags(skb, gfp_mask)) {
59586fa01ccdSSowmini Varadhan 			kfree(data);
59596fa01ccdSSowmini Varadhan 			return -ENOMEM;
59606fa01ccdSSowmini Varadhan 		}
59616fa01ccdSSowmini Varadhan 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
59626fa01ccdSSowmini Varadhan 			skb_frag_ref(skb, i);
59636fa01ccdSSowmini Varadhan 		if (skb_has_frag_list(skb))
59646fa01ccdSSowmini Varadhan 			skb_clone_fraglist(skb);
59656fa01ccdSSowmini Varadhan 		skb_release_data(skb);
59666fa01ccdSSowmini Varadhan 	} else {
59676fa01ccdSSowmini Varadhan 		/* we can reuse existing recount- all we did was
59686fa01ccdSSowmini Varadhan 		 * relocate values
59696fa01ccdSSowmini Varadhan 		 */
59706fa01ccdSSowmini Varadhan 		skb_free_head(skb);
59716fa01ccdSSowmini Varadhan 	}
59726fa01ccdSSowmini Varadhan 
59736fa01ccdSSowmini Varadhan 	skb->head = data;
59746fa01ccdSSowmini Varadhan 	skb->data = data;
59756fa01ccdSSowmini Varadhan 	skb->head_frag = 0;
59766fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET
59776fa01ccdSSowmini Varadhan 	skb->end = size;
59786fa01ccdSSowmini Varadhan #else
59796fa01ccdSSowmini Varadhan 	skb->end = skb->head + size;
59806fa01ccdSSowmini Varadhan #endif
59816fa01ccdSSowmini Varadhan 	skb_set_tail_pointer(skb, skb_headlen(skb));
59826fa01ccdSSowmini Varadhan 	skb_headers_offset_update(skb, 0);
59836fa01ccdSSowmini Varadhan 	skb->cloned = 0;
59846fa01ccdSSowmini Varadhan 	skb->hdr_len = 0;
59856fa01ccdSSowmini Varadhan 	skb->nohdr = 0;
59866fa01ccdSSowmini Varadhan 	atomic_set(&skb_shinfo(skb)->dataref, 1);
59876fa01ccdSSowmini Varadhan 
59886fa01ccdSSowmini Varadhan 	return 0;
59896fa01ccdSSowmini Varadhan }
59906fa01ccdSSowmini Varadhan 
59916fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
59926fa01ccdSSowmini Varadhan 
59936fa01ccdSSowmini Varadhan /* carve out the first eat bytes from skb's frag_list. May recurse into
59946fa01ccdSSowmini Varadhan  * pskb_carve()
59956fa01ccdSSowmini Varadhan  */
59966fa01ccdSSowmini Varadhan static int pskb_carve_frag_list(struct sk_buff *skb,
59976fa01ccdSSowmini Varadhan 				struct skb_shared_info *shinfo, int eat,
59986fa01ccdSSowmini Varadhan 				gfp_t gfp_mask)
59996fa01ccdSSowmini Varadhan {
60006fa01ccdSSowmini Varadhan 	struct sk_buff *list = shinfo->frag_list;
60016fa01ccdSSowmini Varadhan 	struct sk_buff *clone = NULL;
60026fa01ccdSSowmini Varadhan 	struct sk_buff *insp = NULL;
60036fa01ccdSSowmini Varadhan 
60046fa01ccdSSowmini Varadhan 	do {
60056fa01ccdSSowmini Varadhan 		if (!list) {
60066fa01ccdSSowmini Varadhan 			pr_err("Not enough bytes to eat. Want %d\n", eat);
60076fa01ccdSSowmini Varadhan 			return -EFAULT;
60086fa01ccdSSowmini Varadhan 		}
60096fa01ccdSSowmini Varadhan 		if (list->len <= eat) {
60106fa01ccdSSowmini Varadhan 			/* Eaten as whole. */
60116fa01ccdSSowmini Varadhan 			eat -= list->len;
60126fa01ccdSSowmini Varadhan 			list = list->next;
60136fa01ccdSSowmini Varadhan 			insp = list;
60146fa01ccdSSowmini Varadhan 		} else {
60156fa01ccdSSowmini Varadhan 			/* Eaten partially. */
60166fa01ccdSSowmini Varadhan 			if (skb_shared(list)) {
60176fa01ccdSSowmini Varadhan 				clone = skb_clone(list, gfp_mask);
60186fa01ccdSSowmini Varadhan 				if (!clone)
60196fa01ccdSSowmini Varadhan 					return -ENOMEM;
60206fa01ccdSSowmini Varadhan 				insp = list->next;
60216fa01ccdSSowmini Varadhan 				list = clone;
60226fa01ccdSSowmini Varadhan 			} else {
60236fa01ccdSSowmini Varadhan 				/* This may be pulled without problems. */
60246fa01ccdSSowmini Varadhan 				insp = list;
60256fa01ccdSSowmini Varadhan 			}
60266fa01ccdSSowmini Varadhan 			if (pskb_carve(list, eat, gfp_mask) < 0) {
60276fa01ccdSSowmini Varadhan 				kfree_skb(clone);
60286fa01ccdSSowmini Varadhan 				return -ENOMEM;
60296fa01ccdSSowmini Varadhan 			}
60306fa01ccdSSowmini Varadhan 			break;
60316fa01ccdSSowmini Varadhan 		}
60326fa01ccdSSowmini Varadhan 	} while (eat);
60336fa01ccdSSowmini Varadhan 
60346fa01ccdSSowmini Varadhan 	/* Free pulled out fragments. */
60356fa01ccdSSowmini Varadhan 	while ((list = shinfo->frag_list) != insp) {
60366fa01ccdSSowmini Varadhan 		shinfo->frag_list = list->next;
60376fa01ccdSSowmini Varadhan 		kfree_skb(list);
60386fa01ccdSSowmini Varadhan 	}
60396fa01ccdSSowmini Varadhan 	/* And insert new clone at head. */
60406fa01ccdSSowmini Varadhan 	if (clone) {
60416fa01ccdSSowmini Varadhan 		clone->next = list;
60426fa01ccdSSowmini Varadhan 		shinfo->frag_list = clone;
60436fa01ccdSSowmini Varadhan 	}
60446fa01ccdSSowmini Varadhan 	return 0;
60456fa01ccdSSowmini Varadhan }
60466fa01ccdSSowmini Varadhan 
60476fa01ccdSSowmini Varadhan /* carve off first len bytes from skb. Split line (off) is in the
60486fa01ccdSSowmini Varadhan  * non-linear part of skb
60496fa01ccdSSowmini Varadhan  */
60506fa01ccdSSowmini Varadhan static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
60516fa01ccdSSowmini Varadhan 				       int pos, gfp_t gfp_mask)
60526fa01ccdSSowmini Varadhan {
60536fa01ccdSSowmini Varadhan 	int i, k = 0;
60546fa01ccdSSowmini Varadhan 	int size = skb_end_offset(skb);
60556fa01ccdSSowmini Varadhan 	u8 *data;
60566fa01ccdSSowmini Varadhan 	const int nfrags = skb_shinfo(skb)->nr_frags;
60576fa01ccdSSowmini Varadhan 	struct skb_shared_info *shinfo;
60586fa01ccdSSowmini Varadhan 
60596fa01ccdSSowmini Varadhan 	size = SKB_DATA_ALIGN(size);
60606fa01ccdSSowmini Varadhan 
60616fa01ccdSSowmini Varadhan 	if (skb_pfmemalloc(skb))
60626fa01ccdSSowmini Varadhan 		gfp_mask |= __GFP_MEMALLOC;
60636fa01ccdSSowmini Varadhan 	data = kmalloc_reserve(size +
60646fa01ccdSSowmini Varadhan 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
60656fa01ccdSSowmini Varadhan 			       gfp_mask, NUMA_NO_NODE, NULL);
60666fa01ccdSSowmini Varadhan 	if (!data)
60676fa01ccdSSowmini Varadhan 		return -ENOMEM;
60686fa01ccdSSowmini Varadhan 
60696fa01ccdSSowmini Varadhan 	size = SKB_WITH_OVERHEAD(ksize(data));
60706fa01ccdSSowmini Varadhan 
60716fa01ccdSSowmini Varadhan 	memcpy((struct skb_shared_info *)(data + size),
6072e3ec1e8cSMiaohe Lin 	       skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
60736fa01ccdSSowmini Varadhan 	if (skb_orphan_frags(skb, gfp_mask)) {
60746fa01ccdSSowmini Varadhan 		kfree(data);
60756fa01ccdSSowmini Varadhan 		return -ENOMEM;
60766fa01ccdSSowmini Varadhan 	}
60776fa01ccdSSowmini Varadhan 	shinfo = (struct skb_shared_info *)(data + size);
60786fa01ccdSSowmini Varadhan 	for (i = 0; i < nfrags; i++) {
60796fa01ccdSSowmini Varadhan 		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
60806fa01ccdSSowmini Varadhan 
60816fa01ccdSSowmini Varadhan 		if (pos + fsize > off) {
60826fa01ccdSSowmini Varadhan 			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
60836fa01ccdSSowmini Varadhan 
60846fa01ccdSSowmini Varadhan 			if (pos < off) {
60856fa01ccdSSowmini Varadhan 				/* Split frag.
60866fa01ccdSSowmini Varadhan 				 * We have two variants in this case:
60876fa01ccdSSowmini Varadhan 				 * 1. Move all the frag to the second
60886fa01ccdSSowmini Varadhan 				 *    part, if it is possible. F.e.
60896fa01ccdSSowmini Varadhan 				 *    this approach is mandatory for TUX,
60906fa01ccdSSowmini Varadhan 				 *    where splitting is expensive.
60916fa01ccdSSowmini Varadhan 				 * 2. Split is accurately. We make this.
60926fa01ccdSSowmini Varadhan 				 */
6093b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[0], off - pos);
60946fa01ccdSSowmini Varadhan 				skb_frag_size_sub(&shinfo->frags[0], off - pos);
60956fa01ccdSSowmini Varadhan 			}
60966fa01ccdSSowmini Varadhan 			skb_frag_ref(skb, i);
60976fa01ccdSSowmini Varadhan 			k++;
60986fa01ccdSSowmini Varadhan 		}
60996fa01ccdSSowmini Varadhan 		pos += fsize;
61006fa01ccdSSowmini Varadhan 	}
61016fa01ccdSSowmini Varadhan 	shinfo->nr_frags = k;
61026fa01ccdSSowmini Varadhan 	if (skb_has_frag_list(skb))
61036fa01ccdSSowmini Varadhan 		skb_clone_fraglist(skb);
61046fa01ccdSSowmini Varadhan 
61056fa01ccdSSowmini Varadhan 	/* split line is in frag list */
6106eabe8618SMiaohe Lin 	if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6107eabe8618SMiaohe Lin 		/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6108eabe8618SMiaohe Lin 		if (skb_has_frag_list(skb))
6109eabe8618SMiaohe Lin 			kfree_skb_list(skb_shinfo(skb)->frag_list);
6110eabe8618SMiaohe Lin 		kfree(data);
6111eabe8618SMiaohe Lin 		return -ENOMEM;
61126fa01ccdSSowmini Varadhan 	}
61136fa01ccdSSowmini Varadhan 	skb_release_data(skb);
61146fa01ccdSSowmini Varadhan 
61156fa01ccdSSowmini Varadhan 	skb->head = data;
61166fa01ccdSSowmini Varadhan 	skb->head_frag = 0;
61176fa01ccdSSowmini Varadhan 	skb->data = data;
61186fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET
61196fa01ccdSSowmini Varadhan 	skb->end = size;
61206fa01ccdSSowmini Varadhan #else
61216fa01ccdSSowmini Varadhan 	skb->end = skb->head + size;
61226fa01ccdSSowmini Varadhan #endif
61236fa01ccdSSowmini Varadhan 	skb_reset_tail_pointer(skb);
61246fa01ccdSSowmini Varadhan 	skb_headers_offset_update(skb, 0);
61256fa01ccdSSowmini Varadhan 	skb->cloned   = 0;
61266fa01ccdSSowmini Varadhan 	skb->hdr_len  = 0;
61276fa01ccdSSowmini Varadhan 	skb->nohdr    = 0;
61286fa01ccdSSowmini Varadhan 	skb->len -= off;
61296fa01ccdSSowmini Varadhan 	skb->data_len = skb->len;
61306fa01ccdSSowmini Varadhan 	atomic_set(&skb_shinfo(skb)->dataref, 1);
61316fa01ccdSSowmini Varadhan 	return 0;
61326fa01ccdSSowmini Varadhan }
61336fa01ccdSSowmini Varadhan 
61346fa01ccdSSowmini Varadhan /* remove len bytes from the beginning of the skb */
61356fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
61366fa01ccdSSowmini Varadhan {
61376fa01ccdSSowmini Varadhan 	int headlen = skb_headlen(skb);
61386fa01ccdSSowmini Varadhan 
61396fa01ccdSSowmini Varadhan 	if (len < headlen)
61406fa01ccdSSowmini Varadhan 		return pskb_carve_inside_header(skb, len, headlen, gfp);
61416fa01ccdSSowmini Varadhan 	else
61426fa01ccdSSowmini Varadhan 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
61436fa01ccdSSowmini Varadhan }
61446fa01ccdSSowmini Varadhan 
61456fa01ccdSSowmini Varadhan /* Extract to_copy bytes starting at off from skb, and return this in
61466fa01ccdSSowmini Varadhan  * a new skb
61476fa01ccdSSowmini Varadhan  */
61486fa01ccdSSowmini Varadhan struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
61496fa01ccdSSowmini Varadhan 			     int to_copy, gfp_t gfp)
61506fa01ccdSSowmini Varadhan {
61516fa01ccdSSowmini Varadhan 	struct sk_buff  *clone = skb_clone(skb, gfp);
61526fa01ccdSSowmini Varadhan 
61536fa01ccdSSowmini Varadhan 	if (!clone)
61546fa01ccdSSowmini Varadhan 		return NULL;
61556fa01ccdSSowmini Varadhan 
61566fa01ccdSSowmini Varadhan 	if (pskb_carve(clone, off, gfp) < 0 ||
61576fa01ccdSSowmini Varadhan 	    pskb_trim(clone, to_copy)) {
61586fa01ccdSSowmini Varadhan 		kfree_skb(clone);
61596fa01ccdSSowmini Varadhan 		return NULL;
61606fa01ccdSSowmini Varadhan 	}
61616fa01ccdSSowmini Varadhan 	return clone;
61626fa01ccdSSowmini Varadhan }
61636fa01ccdSSowmini Varadhan EXPORT_SYMBOL(pskb_extract);
6164c8c8b127SEric Dumazet 
6165c8c8b127SEric Dumazet /**
6166c8c8b127SEric Dumazet  * skb_condense - try to get rid of fragments/frag_list if possible
6167c8c8b127SEric Dumazet  * @skb: buffer
6168c8c8b127SEric Dumazet  *
6169c8c8b127SEric Dumazet  * Can be used to save memory before skb is added to a busy queue.
6170c8c8b127SEric Dumazet  * If packet has bytes in frags and enough tail room in skb->head,
6171c8c8b127SEric Dumazet  * pull all of them, so that we can free the frags right now and adjust
6172c8c8b127SEric Dumazet  * truesize.
6173c8c8b127SEric Dumazet  * Notes:
6174c8c8b127SEric Dumazet  *	We do not reallocate skb->head thus can not fail.
6175c8c8b127SEric Dumazet  *	Caller must re-evaluate skb->truesize if needed.
6176c8c8b127SEric Dumazet  */
6177c8c8b127SEric Dumazet void skb_condense(struct sk_buff *skb)
6178c8c8b127SEric Dumazet {
61793174fed9SEric Dumazet 	if (skb->data_len) {
61803174fed9SEric Dumazet 		if (skb->data_len > skb->end - skb->tail ||
6181c8c8b127SEric Dumazet 		    skb_cloned(skb))
6182c8c8b127SEric Dumazet 			return;
6183c8c8b127SEric Dumazet 
6184c8c8b127SEric Dumazet 		/* Nice, we can free page frag(s) right now */
6185c8c8b127SEric Dumazet 		__pskb_pull_tail(skb, skb->data_len);
61863174fed9SEric Dumazet 	}
61873174fed9SEric Dumazet 	/* At this point, skb->truesize might be over estimated,
61883174fed9SEric Dumazet 	 * because skb had a fragment, and fragments do not tell
61893174fed9SEric Dumazet 	 * their truesize.
61903174fed9SEric Dumazet 	 * When we pulled its content into skb->head, fragment
61913174fed9SEric Dumazet 	 * was freed, but __pskb_pull_tail() could not possibly
61923174fed9SEric Dumazet 	 * adjust skb->truesize, not knowing the frag truesize.
6193c8c8b127SEric Dumazet 	 */
6194c8c8b127SEric Dumazet 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6195c8c8b127SEric Dumazet }
6196df5042f4SFlorian Westphal 
6197df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
6198df5042f4SFlorian Westphal static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6199df5042f4SFlorian Westphal {
6200df5042f4SFlorian Westphal 	return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6201df5042f4SFlorian Westphal }
6202df5042f4SFlorian Westphal 
62038b69a803SPaolo Abeni /**
62048b69a803SPaolo Abeni  * __skb_ext_alloc - allocate a new skb extensions storage
62058b69a803SPaolo Abeni  *
62064930f483SFlorian Westphal  * @flags: See kmalloc().
62074930f483SFlorian Westphal  *
62088b69a803SPaolo Abeni  * Returns the newly allocated pointer. The pointer can later attached to a
62098b69a803SPaolo Abeni  * skb via __skb_ext_set().
62108b69a803SPaolo Abeni  * Note: caller must handle the skb_ext as an opaque data.
62118b69a803SPaolo Abeni  */
62124930f483SFlorian Westphal struct skb_ext *__skb_ext_alloc(gfp_t flags)
6213df5042f4SFlorian Westphal {
62144930f483SFlorian Westphal 	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6215df5042f4SFlorian Westphal 
6216df5042f4SFlorian Westphal 	if (new) {
6217df5042f4SFlorian Westphal 		memset(new->offset, 0, sizeof(new->offset));
6218df5042f4SFlorian Westphal 		refcount_set(&new->refcnt, 1);
6219df5042f4SFlorian Westphal 	}
6220df5042f4SFlorian Westphal 
6221df5042f4SFlorian Westphal 	return new;
6222df5042f4SFlorian Westphal }
6223df5042f4SFlorian Westphal 
62244165079bSFlorian Westphal static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
62254165079bSFlorian Westphal 					 unsigned int old_active)
6226df5042f4SFlorian Westphal {
6227df5042f4SFlorian Westphal 	struct skb_ext *new;
6228df5042f4SFlorian Westphal 
6229df5042f4SFlorian Westphal 	if (refcount_read(&old->refcnt) == 1)
6230df5042f4SFlorian Westphal 		return old;
6231df5042f4SFlorian Westphal 
6232df5042f4SFlorian Westphal 	new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6233df5042f4SFlorian Westphal 	if (!new)
6234df5042f4SFlorian Westphal 		return NULL;
6235df5042f4SFlorian Westphal 
6236df5042f4SFlorian Westphal 	memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6237df5042f4SFlorian Westphal 	refcount_set(&new->refcnt, 1);
6238df5042f4SFlorian Westphal 
62394165079bSFlorian Westphal #ifdef CONFIG_XFRM
62404165079bSFlorian Westphal 	if (old_active & (1 << SKB_EXT_SEC_PATH)) {
62414165079bSFlorian Westphal 		struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
62424165079bSFlorian Westphal 		unsigned int i;
62434165079bSFlorian Westphal 
62444165079bSFlorian Westphal 		for (i = 0; i < sp->len; i++)
62454165079bSFlorian Westphal 			xfrm_state_hold(sp->xvec[i]);
62464165079bSFlorian Westphal 	}
62474165079bSFlorian Westphal #endif
6248df5042f4SFlorian Westphal 	__skb_ext_put(old);
6249df5042f4SFlorian Westphal 	return new;
6250df5042f4SFlorian Westphal }
6251df5042f4SFlorian Westphal 
6252df5042f4SFlorian Westphal /**
62538b69a803SPaolo Abeni  * __skb_ext_set - attach the specified extension storage to this skb
62548b69a803SPaolo Abeni  * @skb: buffer
62558b69a803SPaolo Abeni  * @id: extension id
62568b69a803SPaolo Abeni  * @ext: extension storage previously allocated via __skb_ext_alloc()
62578b69a803SPaolo Abeni  *
62588b69a803SPaolo Abeni  * Existing extensions, if any, are cleared.
62598b69a803SPaolo Abeni  *
62608b69a803SPaolo Abeni  * Returns the pointer to the extension.
62618b69a803SPaolo Abeni  */
62628b69a803SPaolo Abeni void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
62638b69a803SPaolo Abeni 		    struct skb_ext *ext)
62648b69a803SPaolo Abeni {
62658b69a803SPaolo Abeni 	unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
62668b69a803SPaolo Abeni 
62678b69a803SPaolo Abeni 	skb_ext_put(skb);
62688b69a803SPaolo Abeni 	newlen = newoff + skb_ext_type_len[id];
62698b69a803SPaolo Abeni 	ext->chunks = newlen;
62708b69a803SPaolo Abeni 	ext->offset[id] = newoff;
62718b69a803SPaolo Abeni 	skb->extensions = ext;
62728b69a803SPaolo Abeni 	skb->active_extensions = 1 << id;
62738b69a803SPaolo Abeni 	return skb_ext_get_ptr(ext, id);
62748b69a803SPaolo Abeni }
62758b69a803SPaolo Abeni 
62768b69a803SPaolo Abeni /**
6277df5042f4SFlorian Westphal  * skb_ext_add - allocate space for given extension, COW if needed
6278df5042f4SFlorian Westphal  * @skb: buffer
6279df5042f4SFlorian Westphal  * @id: extension to allocate space for
6280df5042f4SFlorian Westphal  *
6281df5042f4SFlorian Westphal  * Allocates enough space for the given extension.
6282df5042f4SFlorian Westphal  * If the extension is already present, a pointer to that extension
6283df5042f4SFlorian Westphal  * is returned.
6284df5042f4SFlorian Westphal  *
6285df5042f4SFlorian Westphal  * If the skb was cloned, COW applies and the returned memory can be
6286df5042f4SFlorian Westphal  * modified without changing the extension space of clones buffers.
6287df5042f4SFlorian Westphal  *
6288df5042f4SFlorian Westphal  * Returns pointer to the extension or NULL on allocation failure.
6289df5042f4SFlorian Westphal  */
6290df5042f4SFlorian Westphal void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6291df5042f4SFlorian Westphal {
6292df5042f4SFlorian Westphal 	struct skb_ext *new, *old = NULL;
6293df5042f4SFlorian Westphal 	unsigned int newlen, newoff;
6294df5042f4SFlorian Westphal 
6295df5042f4SFlorian Westphal 	if (skb->active_extensions) {
6296df5042f4SFlorian Westphal 		old = skb->extensions;
6297df5042f4SFlorian Westphal 
62984165079bSFlorian Westphal 		new = skb_ext_maybe_cow(old, skb->active_extensions);
6299df5042f4SFlorian Westphal 		if (!new)
6300df5042f4SFlorian Westphal 			return NULL;
6301df5042f4SFlorian Westphal 
6302682ec859SPaolo Abeni 		if (__skb_ext_exist(new, id))
6303df5042f4SFlorian Westphal 			goto set_active;
6304df5042f4SFlorian Westphal 
6305e94e50bdSPaolo Abeni 		newoff = new->chunks;
6306df5042f4SFlorian Westphal 	} else {
6307df5042f4SFlorian Westphal 		newoff = SKB_EXT_CHUNKSIZEOF(*new);
6308df5042f4SFlorian Westphal 
63094930f483SFlorian Westphal 		new = __skb_ext_alloc(GFP_ATOMIC);
6310df5042f4SFlorian Westphal 		if (!new)
6311df5042f4SFlorian Westphal 			return NULL;
6312df5042f4SFlorian Westphal 	}
6313df5042f4SFlorian Westphal 
6314df5042f4SFlorian Westphal 	newlen = newoff + skb_ext_type_len[id];
6315df5042f4SFlorian Westphal 	new->chunks = newlen;
6316df5042f4SFlorian Westphal 	new->offset[id] = newoff;
6317df5042f4SFlorian Westphal set_active:
6318682ec859SPaolo Abeni 	skb->extensions = new;
6319df5042f4SFlorian Westphal 	skb->active_extensions |= 1 << id;
6320df5042f4SFlorian Westphal 	return skb_ext_get_ptr(new, id);
6321df5042f4SFlorian Westphal }
6322df5042f4SFlorian Westphal EXPORT_SYMBOL(skb_ext_add);
6323df5042f4SFlorian Westphal 
63244165079bSFlorian Westphal #ifdef CONFIG_XFRM
63254165079bSFlorian Westphal static void skb_ext_put_sp(struct sec_path *sp)
63264165079bSFlorian Westphal {
63274165079bSFlorian Westphal 	unsigned int i;
63284165079bSFlorian Westphal 
63294165079bSFlorian Westphal 	for (i = 0; i < sp->len; i++)
63304165079bSFlorian Westphal 		xfrm_state_put(sp->xvec[i]);
63314165079bSFlorian Westphal }
63324165079bSFlorian Westphal #endif
63334165079bSFlorian Westphal 
6334df5042f4SFlorian Westphal void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6335df5042f4SFlorian Westphal {
6336df5042f4SFlorian Westphal 	struct skb_ext *ext = skb->extensions;
6337df5042f4SFlorian Westphal 
6338df5042f4SFlorian Westphal 	skb->active_extensions &= ~(1 << id);
6339df5042f4SFlorian Westphal 	if (skb->active_extensions == 0) {
6340df5042f4SFlorian Westphal 		skb->extensions = NULL;
6341df5042f4SFlorian Westphal 		__skb_ext_put(ext);
63424165079bSFlorian Westphal #ifdef CONFIG_XFRM
63434165079bSFlorian Westphal 	} else if (id == SKB_EXT_SEC_PATH &&
63444165079bSFlorian Westphal 		   refcount_read(&ext->refcnt) == 1) {
63454165079bSFlorian Westphal 		struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
63464165079bSFlorian Westphal 
63474165079bSFlorian Westphal 		skb_ext_put_sp(sp);
63484165079bSFlorian Westphal 		sp->len = 0;
63494165079bSFlorian Westphal #endif
6350df5042f4SFlorian Westphal 	}
6351df5042f4SFlorian Westphal }
6352df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_del);
6353df5042f4SFlorian Westphal 
6354df5042f4SFlorian Westphal void __skb_ext_put(struct skb_ext *ext)
6355df5042f4SFlorian Westphal {
6356df5042f4SFlorian Westphal 	/* If this is last clone, nothing can increment
6357df5042f4SFlorian Westphal 	 * it after check passes.  Avoids one atomic op.
6358df5042f4SFlorian Westphal 	 */
6359df5042f4SFlorian Westphal 	if (refcount_read(&ext->refcnt) == 1)
6360df5042f4SFlorian Westphal 		goto free_now;
6361df5042f4SFlorian Westphal 
6362df5042f4SFlorian Westphal 	if (!refcount_dec_and_test(&ext->refcnt))
6363df5042f4SFlorian Westphal 		return;
6364df5042f4SFlorian Westphal free_now:
63654165079bSFlorian Westphal #ifdef CONFIG_XFRM
63664165079bSFlorian Westphal 	if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
63674165079bSFlorian Westphal 		skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
63684165079bSFlorian Westphal #endif
63694165079bSFlorian Westphal 
6370df5042f4SFlorian Westphal 	kmem_cache_free(skbuff_ext_cache, ext);
6371df5042f4SFlorian Westphal }
6372df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_put);
6373df5042f4SFlorian Westphal #endif /* CONFIG_SKB_EXTENSIONS */
6374