xref: /openbmc/linux/net/core/skbuff.c (revision 2874c5fd)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	Routines having to do with the 'struct sk_buff' memory handlers.
41da177e4SLinus Torvalds  *
5113aa838SAlan Cox  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
61da177e4SLinus Torvalds  *			Florian La Roche <rzsfl@rz.uni-sb.de>
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *	Fixes:
91da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the worst of the load
101da177e4SLinus Torvalds  *					balancer bugs.
111da177e4SLinus Torvalds  *		Dave Platt	:	Interrupt stacking fix.
121da177e4SLinus Torvalds  *	Richard Kooijman	:	Timestamp fixes.
131da177e4SLinus Torvalds  *		Alan Cox	:	Changed buffer format.
141da177e4SLinus Torvalds  *		Alan Cox	:	destructor hook for AF_UNIX etc.
151da177e4SLinus Torvalds  *		Linus Torvalds	:	Better skb_clone.
161da177e4SLinus Torvalds  *		Alan Cox	:	Added skb_copy.
171da177e4SLinus Torvalds  *		Alan Cox	:	Added all the changed routines Linus
181da177e4SLinus Torvalds  *					only put in the headers
191da177e4SLinus Torvalds  *		Ray VanTassle	:	Fixed --skb->lock in free
201da177e4SLinus Torvalds  *		Alan Cox	:	skb_copy copy arp field
211da177e4SLinus Torvalds  *		Andi Kleen	:	slabified it.
221da177e4SLinus Torvalds  *		Robert Olsson	:	Removed skb_head_pool
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *	NOTE:
251da177e4SLinus Torvalds  *		The __skb_ routines should be called with interrupts
261da177e4SLinus Torvalds  *	disabled, or you better be *real* sure that the operation is atomic
271da177e4SLinus Torvalds  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
281da177e4SLinus Torvalds  *	or via disabling bottom half handlers, etc).
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds /*
321da177e4SLinus Torvalds  *	The functions in this file will not compile correctly with gcc 2.4.x
331da177e4SLinus Torvalds  */
341da177e4SLinus Torvalds 
35e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36e005d193SJoe Perches 
371da177e4SLinus Torvalds #include <linux/module.h>
381da177e4SLinus Torvalds #include <linux/types.h>
391da177e4SLinus Torvalds #include <linux/kernel.h>
401da177e4SLinus Torvalds #include <linux/mm.h>
411da177e4SLinus Torvalds #include <linux/interrupt.h>
421da177e4SLinus Torvalds #include <linux/in.h>
431da177e4SLinus Torvalds #include <linux/inet.h>
441da177e4SLinus Torvalds #include <linux/slab.h>
45de960aa9SFlorian Westphal #include <linux/tcp.h>
46de960aa9SFlorian Westphal #include <linux/udp.h>
4790017accSMarcelo Ricardo Leitner #include <linux/sctp.h>
481da177e4SLinus Torvalds #include <linux/netdevice.h>
491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT
501da177e4SLinus Torvalds #include <net/pkt_sched.h>
511da177e4SLinus Torvalds #endif
521da177e4SLinus Torvalds #include <linux/string.h>
531da177e4SLinus Torvalds #include <linux/skbuff.h>
549c55e01cSJens Axboe #include <linux/splice.h>
551da177e4SLinus Torvalds #include <linux/cache.h>
561da177e4SLinus Torvalds #include <linux/rtnetlink.h>
571da177e4SLinus Torvalds #include <linux/init.h>
58716ea3a7SDavid Howells #include <linux/scatterlist.h>
59ac45f602SPatrick Ohly #include <linux/errqueue.h>
60268bb0ceSLinus Torvalds #include <linux/prefetch.h>
610d5501c1SVlad Yasevich #include <linux/if_vlan.h>
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds #include <net/protocol.h>
641da177e4SLinus Torvalds #include <net/dst.h>
651da177e4SLinus Torvalds #include <net/sock.h>
661da177e4SLinus Torvalds #include <net/checksum.h>
67ed1f50c3SPaul Durrant #include <net/ip6_checksum.h>
681da177e4SLinus Torvalds #include <net/xfrm.h>
691da177e4SLinus Torvalds 
707c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
71ad8d75ffSSteven Rostedt #include <trace/events/skb.h>
7251c56b00SEric Dumazet #include <linux/highmem.h>
73b245be1fSWillem de Bruijn #include <linux/capability.h>
74b245be1fSWillem de Bruijn #include <linux/user_namespace.h>
75a1f8e7f7SAl Viro 
767b7ed885SBart Van Assche #include "datagram.h"
777b7ed885SBart Van Assche 
7808009a76SAlexey Dobriyan struct kmem_cache *skbuff_head_cache __ro_after_init;
7908009a76SAlexey Dobriyan static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
80df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
81df5042f4SFlorian Westphal static struct kmem_cache *skbuff_ext_cache __ro_after_init;
82df5042f4SFlorian Westphal #endif
835f74f82eSHans Westgaard Ry int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
845f74f82eSHans Westgaard Ry EXPORT_SYMBOL(sysctl_max_skb_frags);
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds /**
87f05de73bSJean Sacren  *	skb_panic - private function for out-of-line support
881da177e4SLinus Torvalds  *	@skb:	buffer
891da177e4SLinus Torvalds  *	@sz:	size
90f05de73bSJean Sacren  *	@addr:	address
9199d5851eSJames Hogan  *	@msg:	skb_over_panic or skb_under_panic
921da177e4SLinus Torvalds  *
93f05de73bSJean Sacren  *	Out-of-line support for skb_put() and skb_push().
94f05de73bSJean Sacren  *	Called via the wrapper skb_over_panic() or skb_under_panic().
95f05de73bSJean Sacren  *	Keep out of line to prevent kernel bloat.
96f05de73bSJean Sacren  *	__builtin_return_address is not used because it is not always reliable.
971da177e4SLinus Torvalds  */
98f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
9999d5851eSJames Hogan 		      const char msg[])
1001da177e4SLinus Torvalds {
101e005d193SJoe Perches 	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
10299d5851eSJames Hogan 		 msg, addr, skb->len, sz, skb->head, skb->data,
1034305b541SArnaldo Carvalho de Melo 		 (unsigned long)skb->tail, (unsigned long)skb->end,
10426095455SPatrick McHardy 		 skb->dev ? skb->dev->name : "<NULL>");
1051da177e4SLinus Torvalds 	BUG();
1061da177e4SLinus Torvalds }
1071da177e4SLinus Torvalds 
108f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
1091da177e4SLinus Torvalds {
110f05de73bSJean Sacren 	skb_panic(skb, sz, addr, __func__);
1111da177e4SLinus Torvalds }
1121da177e4SLinus Torvalds 
113f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
114f05de73bSJean Sacren {
115f05de73bSJean Sacren 	skb_panic(skb, sz, addr, __func__);
116f05de73bSJean Sacren }
117c93bdd0eSMel Gorman 
118c93bdd0eSMel Gorman /*
119c93bdd0eSMel Gorman  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
120c93bdd0eSMel Gorman  * the caller if emergency pfmemalloc reserves are being used. If it is and
121c93bdd0eSMel Gorman  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
122c93bdd0eSMel Gorman  * may be used. Otherwise, the packet data may be discarded until enough
123c93bdd0eSMel Gorman  * memory is free
124c93bdd0eSMel Gorman  */
125c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
126c93bdd0eSMel Gorman 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
12761c5e88aSstephen hemminger 
12861c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
12961c5e88aSstephen hemminger 			       unsigned long ip, bool *pfmemalloc)
130c93bdd0eSMel Gorman {
131c93bdd0eSMel Gorman 	void *obj;
132c93bdd0eSMel Gorman 	bool ret_pfmemalloc = false;
133c93bdd0eSMel Gorman 
134c93bdd0eSMel Gorman 	/*
135c93bdd0eSMel Gorman 	 * Try a regular allocation, when that fails and we're not entitled
136c93bdd0eSMel Gorman 	 * to the reserves, fail.
137c93bdd0eSMel Gorman 	 */
138c93bdd0eSMel Gorman 	obj = kmalloc_node_track_caller(size,
139c93bdd0eSMel Gorman 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
140c93bdd0eSMel Gorman 					node);
141c93bdd0eSMel Gorman 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
142c93bdd0eSMel Gorman 		goto out;
143c93bdd0eSMel Gorman 
144c93bdd0eSMel Gorman 	/* Try again but now we are using pfmemalloc reserves */
145c93bdd0eSMel Gorman 	ret_pfmemalloc = true;
146c93bdd0eSMel Gorman 	obj = kmalloc_node_track_caller(size, flags, node);
147c93bdd0eSMel Gorman 
148c93bdd0eSMel Gorman out:
149c93bdd0eSMel Gorman 	if (pfmemalloc)
150c93bdd0eSMel Gorman 		*pfmemalloc = ret_pfmemalloc;
151c93bdd0eSMel Gorman 
152c93bdd0eSMel Gorman 	return obj;
153c93bdd0eSMel Gorman }
154c93bdd0eSMel Gorman 
1551da177e4SLinus Torvalds /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
1561da177e4SLinus Torvalds  *	'private' fields and also do memory statistics to find all the
1571da177e4SLinus Torvalds  *	[BEEP] leaks.
1581da177e4SLinus Torvalds  *
1591da177e4SLinus Torvalds  */
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds /**
162d179cd12SDavid S. Miller  *	__alloc_skb	-	allocate a network buffer
1631da177e4SLinus Torvalds  *	@size: size to allocate
1641da177e4SLinus Torvalds  *	@gfp_mask: allocation mask
165c93bdd0eSMel Gorman  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
166c93bdd0eSMel Gorman  *		instead of head cache and allocate a cloned (child) skb.
167c93bdd0eSMel Gorman  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
168c93bdd0eSMel Gorman  *		allocations in case the data is required for writeback
169b30973f8SChristoph Hellwig  *	@node: numa node to allocate memory on
1701da177e4SLinus Torvalds  *
1711da177e4SLinus Torvalds  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
17294b6042cSBen Hutchings  *	tail room of at least size bytes. The object has a reference count
17394b6042cSBen Hutchings  *	of one. The return is the buffer. On a failure the return is %NULL.
1741da177e4SLinus Torvalds  *
1751da177e4SLinus Torvalds  *	Buffers may only be allocated from interrupts using a @gfp_mask of
1761da177e4SLinus Torvalds  *	%GFP_ATOMIC.
1771da177e4SLinus Torvalds  */
178dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
179c93bdd0eSMel Gorman 			    int flags, int node)
1801da177e4SLinus Torvalds {
181e18b890bSChristoph Lameter 	struct kmem_cache *cache;
1824947d3efSBenjamin LaHaise 	struct skb_shared_info *shinfo;
1831da177e4SLinus Torvalds 	struct sk_buff *skb;
1841da177e4SLinus Torvalds 	u8 *data;
185c93bdd0eSMel Gorman 	bool pfmemalloc;
1861da177e4SLinus Torvalds 
187c93bdd0eSMel Gorman 	cache = (flags & SKB_ALLOC_FCLONE)
188c93bdd0eSMel Gorman 		? skbuff_fclone_cache : skbuff_head_cache;
189c93bdd0eSMel Gorman 
190c93bdd0eSMel Gorman 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
191c93bdd0eSMel Gorman 		gfp_mask |= __GFP_MEMALLOC;
1928798b3fbSHerbert Xu 
1931da177e4SLinus Torvalds 	/* Get the HEAD */
194b30973f8SChristoph Hellwig 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
1951da177e4SLinus Torvalds 	if (!skb)
1961da177e4SLinus Torvalds 		goto out;
197ec7d2f2cSEric Dumazet 	prefetchw(skb);
1981da177e4SLinus Torvalds 
19987fb4b7bSEric Dumazet 	/* We do our best to align skb_shared_info on a separate cache
20087fb4b7bSEric Dumazet 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
20187fb4b7bSEric Dumazet 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
20287fb4b7bSEric Dumazet 	 * Both skb->head and skb_shared_info are cache line aligned.
20387fb4b7bSEric Dumazet 	 */
204bc417e30STony Lindgren 	size = SKB_DATA_ALIGN(size);
20587fb4b7bSEric Dumazet 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
206c93bdd0eSMel Gorman 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
2071da177e4SLinus Torvalds 	if (!data)
2081da177e4SLinus Torvalds 		goto nodata;
20987fb4b7bSEric Dumazet 	/* kmalloc(size) might give us more room than requested.
21087fb4b7bSEric Dumazet 	 * Put skb_shared_info exactly at the end of allocated zone,
21187fb4b7bSEric Dumazet 	 * to allow max possible filling before reallocation.
21287fb4b7bSEric Dumazet 	 */
21387fb4b7bSEric Dumazet 	size = SKB_WITH_OVERHEAD(ksize(data));
214ec7d2f2cSEric Dumazet 	prefetchw(data + size);
2151da177e4SLinus Torvalds 
216ca0605a7SArnaldo Carvalho de Melo 	/*
217c8005785SJohannes Berg 	 * Only clear those fields we need to clear, not those that we will
218c8005785SJohannes Berg 	 * actually initialise below. Hence, don't put any more fields after
219c8005785SJohannes Berg 	 * the tail pointer in struct sk_buff!
220ca0605a7SArnaldo Carvalho de Melo 	 */
221ca0605a7SArnaldo Carvalho de Melo 	memset(skb, 0, offsetof(struct sk_buff, tail));
22287fb4b7bSEric Dumazet 	/* Account for allocated memory : skb + skb->head */
22387fb4b7bSEric Dumazet 	skb->truesize = SKB_TRUESIZE(size);
224c93bdd0eSMel Gorman 	skb->pfmemalloc = pfmemalloc;
22563354797SReshetova, Elena 	refcount_set(&skb->users, 1);
2261da177e4SLinus Torvalds 	skb->head = data;
2271da177e4SLinus Torvalds 	skb->data = data;
22827a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
2294305b541SArnaldo Carvalho de Melo 	skb->end = skb->tail + size;
23035d04610SCong Wang 	skb->mac_header = (typeof(skb->mac_header))~0U;
23135d04610SCong Wang 	skb->transport_header = (typeof(skb->transport_header))~0U;
23219633e12SStephen Hemminger 
2334947d3efSBenjamin LaHaise 	/* make sure we initialize shinfo sequentially */
2344947d3efSBenjamin LaHaise 	shinfo = skb_shinfo(skb);
235ec7d2f2cSEric Dumazet 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
2364947d3efSBenjamin LaHaise 	atomic_set(&shinfo->dataref, 1);
2374947d3efSBenjamin LaHaise 
238c93bdd0eSMel Gorman 	if (flags & SKB_ALLOC_FCLONE) {
239d0bf4a9eSEric Dumazet 		struct sk_buff_fclones *fclones;
2401da177e4SLinus Torvalds 
241d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
242d0bf4a9eSEric Dumazet 
243d179cd12SDavid S. Miller 		skb->fclone = SKB_FCLONE_ORIG;
2442638595aSReshetova, Elena 		refcount_set(&fclones->fclone_ref, 1);
245d179cd12SDavid S. Miller 
2466ffe75ebSEric Dumazet 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
247d179cd12SDavid S. Miller 	}
2481da177e4SLinus Torvalds out:
2491da177e4SLinus Torvalds 	return skb;
2501da177e4SLinus Torvalds nodata:
2518798b3fbSHerbert Xu 	kmem_cache_free(cache, skb);
2521da177e4SLinus Torvalds 	skb = NULL;
2531da177e4SLinus Torvalds 	goto out;
2541da177e4SLinus Torvalds }
255b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb);
2561da177e4SLinus Torvalds 
257ba0509b6SJesper Dangaard Brouer /* Caller must provide SKB that is memset cleared */
258ba0509b6SJesper Dangaard Brouer static struct sk_buff *__build_skb_around(struct sk_buff *skb,
259ba0509b6SJesper Dangaard Brouer 					  void *data, unsigned int frag_size)
260ba0509b6SJesper Dangaard Brouer {
261ba0509b6SJesper Dangaard Brouer 	struct skb_shared_info *shinfo;
262ba0509b6SJesper Dangaard Brouer 	unsigned int size = frag_size ? : ksize(data);
263ba0509b6SJesper Dangaard Brouer 
264ba0509b6SJesper Dangaard Brouer 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
265ba0509b6SJesper Dangaard Brouer 
266ba0509b6SJesper Dangaard Brouer 	/* Assumes caller memset cleared SKB */
267ba0509b6SJesper Dangaard Brouer 	skb->truesize = SKB_TRUESIZE(size);
268ba0509b6SJesper Dangaard Brouer 	refcount_set(&skb->users, 1);
269ba0509b6SJesper Dangaard Brouer 	skb->head = data;
270ba0509b6SJesper Dangaard Brouer 	skb->data = data;
271ba0509b6SJesper Dangaard Brouer 	skb_reset_tail_pointer(skb);
272ba0509b6SJesper Dangaard Brouer 	skb->end = skb->tail + size;
273ba0509b6SJesper Dangaard Brouer 	skb->mac_header = (typeof(skb->mac_header))~0U;
274ba0509b6SJesper Dangaard Brouer 	skb->transport_header = (typeof(skb->transport_header))~0U;
275ba0509b6SJesper Dangaard Brouer 
276ba0509b6SJesper Dangaard Brouer 	/* make sure we initialize shinfo sequentially */
277ba0509b6SJesper Dangaard Brouer 	shinfo = skb_shinfo(skb);
278ba0509b6SJesper Dangaard Brouer 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
279ba0509b6SJesper Dangaard Brouer 	atomic_set(&shinfo->dataref, 1);
280ba0509b6SJesper Dangaard Brouer 
281ba0509b6SJesper Dangaard Brouer 	return skb;
282ba0509b6SJesper Dangaard Brouer }
283ba0509b6SJesper Dangaard Brouer 
2841da177e4SLinus Torvalds /**
2852ea2f62cSEric Dumazet  * __build_skb - build a network buffer
286b2b5ce9dSEric Dumazet  * @data: data buffer provided by caller
2872ea2f62cSEric Dumazet  * @frag_size: size of data, or 0 if head was kmalloced
288b2b5ce9dSEric Dumazet  *
289b2b5ce9dSEric Dumazet  * Allocate a new &sk_buff. Caller provides space holding head and
290deceb4c0SFlorian Fainelli  * skb_shared_info. @data must have been allocated by kmalloc() only if
2912ea2f62cSEric Dumazet  * @frag_size is 0, otherwise data should come from the page allocator
2922ea2f62cSEric Dumazet  *  or vmalloc()
293b2b5ce9dSEric Dumazet  * The return is the new skb buffer.
294b2b5ce9dSEric Dumazet  * On a failure the return is %NULL, and @data is not freed.
295b2b5ce9dSEric Dumazet  * Notes :
296b2b5ce9dSEric Dumazet  *  Before IO, driver allocates only data buffer where NIC put incoming frame
297b2b5ce9dSEric Dumazet  *  Driver should add room at head (NET_SKB_PAD) and
298b2b5ce9dSEric Dumazet  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
299b2b5ce9dSEric Dumazet  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
300b2b5ce9dSEric Dumazet  *  before giving packet to stack.
301b2b5ce9dSEric Dumazet  *  RX rings only contains data buffers, not full skbs.
302b2b5ce9dSEric Dumazet  */
3032ea2f62cSEric Dumazet struct sk_buff *__build_skb(void *data, unsigned int frag_size)
304b2b5ce9dSEric Dumazet {
305b2b5ce9dSEric Dumazet 	struct sk_buff *skb;
306b2b5ce9dSEric Dumazet 
307b2b5ce9dSEric Dumazet 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
308ba0509b6SJesper Dangaard Brouer 	if (unlikely(!skb))
309b2b5ce9dSEric Dumazet 		return NULL;
310b2b5ce9dSEric Dumazet 
311b2b5ce9dSEric Dumazet 	memset(skb, 0, offsetof(struct sk_buff, tail));
312b2b5ce9dSEric Dumazet 
313ba0509b6SJesper Dangaard Brouer 	return __build_skb_around(skb, data, frag_size);
314b2b5ce9dSEric Dumazet }
3152ea2f62cSEric Dumazet 
3162ea2f62cSEric Dumazet /* build_skb() is wrapper over __build_skb(), that specifically
3172ea2f62cSEric Dumazet  * takes care of skb->head and skb->pfmemalloc
3182ea2f62cSEric Dumazet  * This means that if @frag_size is not zero, then @data must be backed
3192ea2f62cSEric Dumazet  * by a page fragment, not kmalloc() or vmalloc()
3202ea2f62cSEric Dumazet  */
3212ea2f62cSEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size)
3222ea2f62cSEric Dumazet {
3232ea2f62cSEric Dumazet 	struct sk_buff *skb = __build_skb(data, frag_size);
3242ea2f62cSEric Dumazet 
3252ea2f62cSEric Dumazet 	if (skb && frag_size) {
3262ea2f62cSEric Dumazet 		skb->head_frag = 1;
3272f064f34SMichal Hocko 		if (page_is_pfmemalloc(virt_to_head_page(data)))
3282ea2f62cSEric Dumazet 			skb->pfmemalloc = 1;
3292ea2f62cSEric Dumazet 	}
3302ea2f62cSEric Dumazet 	return skb;
3312ea2f62cSEric Dumazet }
332b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb);
333b2b5ce9dSEric Dumazet 
334ba0509b6SJesper Dangaard Brouer /**
335ba0509b6SJesper Dangaard Brouer  * build_skb_around - build a network buffer around provided skb
336ba0509b6SJesper Dangaard Brouer  * @skb: sk_buff provide by caller, must be memset cleared
337ba0509b6SJesper Dangaard Brouer  * @data: data buffer provided by caller
338ba0509b6SJesper Dangaard Brouer  * @frag_size: size of data, or 0 if head was kmalloced
339ba0509b6SJesper Dangaard Brouer  */
340ba0509b6SJesper Dangaard Brouer struct sk_buff *build_skb_around(struct sk_buff *skb,
341ba0509b6SJesper Dangaard Brouer 				 void *data, unsigned int frag_size)
342ba0509b6SJesper Dangaard Brouer {
343ba0509b6SJesper Dangaard Brouer 	if (unlikely(!skb))
344ba0509b6SJesper Dangaard Brouer 		return NULL;
345ba0509b6SJesper Dangaard Brouer 
346ba0509b6SJesper Dangaard Brouer 	skb = __build_skb_around(skb, data, frag_size);
347ba0509b6SJesper Dangaard Brouer 
348ba0509b6SJesper Dangaard Brouer 	if (skb && frag_size) {
349ba0509b6SJesper Dangaard Brouer 		skb->head_frag = 1;
350ba0509b6SJesper Dangaard Brouer 		if (page_is_pfmemalloc(virt_to_head_page(data)))
351ba0509b6SJesper Dangaard Brouer 			skb->pfmemalloc = 1;
352ba0509b6SJesper Dangaard Brouer 	}
353ba0509b6SJesper Dangaard Brouer 	return skb;
354ba0509b6SJesper Dangaard Brouer }
355ba0509b6SJesper Dangaard Brouer EXPORT_SYMBOL(build_skb_around);
356ba0509b6SJesper Dangaard Brouer 
357795bb1c0SJesper Dangaard Brouer #define NAPI_SKB_CACHE_SIZE	64
358795bb1c0SJesper Dangaard Brouer 
359795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache {
360795bb1c0SJesper Dangaard Brouer 	struct page_frag_cache page;
361e0d7924aSAlexey Dobriyan 	unsigned int skb_count;
362795bb1c0SJesper Dangaard Brouer 	void *skb_cache[NAPI_SKB_CACHE_SIZE];
363795bb1c0SJesper Dangaard Brouer };
364795bb1c0SJesper Dangaard Brouer 
365b63ae8caSAlexander Duyck static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
366795bb1c0SJesper Dangaard Brouer static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
367ffde7328SAlexander Duyck 
368ffde7328SAlexander Duyck static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
369ffde7328SAlexander Duyck {
370b63ae8caSAlexander Duyck 	struct page_frag_cache *nc;
371ffde7328SAlexander Duyck 	unsigned long flags;
372ffde7328SAlexander Duyck 	void *data;
373ffde7328SAlexander Duyck 
374ffde7328SAlexander Duyck 	local_irq_save(flags);
3759451980aSAlexander Duyck 	nc = this_cpu_ptr(&netdev_alloc_cache);
3768c2dd3e4SAlexander Duyck 	data = page_frag_alloc(nc, fragsz, gfp_mask);
3776f532612SEric Dumazet 	local_irq_restore(flags);
3786f532612SEric Dumazet 	return data;
3796f532612SEric Dumazet }
380c93bdd0eSMel Gorman 
381c93bdd0eSMel Gorman /**
382c93bdd0eSMel Gorman  * netdev_alloc_frag - allocate a page fragment
383c93bdd0eSMel Gorman  * @fragsz: fragment size
384c93bdd0eSMel Gorman  *
385c93bdd0eSMel Gorman  * Allocates a frag from a page for receive buffer.
386c93bdd0eSMel Gorman  * Uses GFP_ATOMIC allocations.
387c93bdd0eSMel Gorman  */
388c93bdd0eSMel Gorman void *netdev_alloc_frag(unsigned int fragsz)
389c93bdd0eSMel Gorman {
3903bed3cc4SAlexander Duyck 	fragsz = SKB_DATA_ALIGN(fragsz);
3913bed3cc4SAlexander Duyck 
392453f85d4SMel Gorman 	return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
393c93bdd0eSMel Gorman }
3946f532612SEric Dumazet EXPORT_SYMBOL(netdev_alloc_frag);
3956f532612SEric Dumazet 
396ffde7328SAlexander Duyck static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
397ffde7328SAlexander Duyck {
398795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
3999451980aSAlexander Duyck 
4008c2dd3e4SAlexander Duyck 	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
401ffde7328SAlexander Duyck }
402ffde7328SAlexander Duyck 
403ffde7328SAlexander Duyck void *napi_alloc_frag(unsigned int fragsz)
404ffde7328SAlexander Duyck {
4053bed3cc4SAlexander Duyck 	fragsz = SKB_DATA_ALIGN(fragsz);
4063bed3cc4SAlexander Duyck 
407453f85d4SMel Gorman 	return __napi_alloc_frag(fragsz, GFP_ATOMIC);
408ffde7328SAlexander Duyck }
409ffde7328SAlexander Duyck EXPORT_SYMBOL(napi_alloc_frag);
410ffde7328SAlexander Duyck 
4116f532612SEric Dumazet /**
412fd11a83dSAlexander Duyck  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
413fd11a83dSAlexander Duyck  *	@dev: network device to receive on
414d7499160SMasanari Iida  *	@len: length to allocate
415fd11a83dSAlexander Duyck  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
416fd11a83dSAlexander Duyck  *
417fd11a83dSAlexander Duyck  *	Allocate a new &sk_buff and assign it a usage count of one. The
418fd11a83dSAlexander Duyck  *	buffer has NET_SKB_PAD headroom built in. Users should allocate
419fd11a83dSAlexander Duyck  *	the headroom they think they need without accounting for the
420fd11a83dSAlexander Duyck  *	built in space. The built in space is used for optimisations.
421fd11a83dSAlexander Duyck  *
422fd11a83dSAlexander Duyck  *	%NULL is returned if there is no free memory.
423fd11a83dSAlexander Duyck  */
4249451980aSAlexander Duyck struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
4259451980aSAlexander Duyck 				   gfp_t gfp_mask)
426fd11a83dSAlexander Duyck {
427b63ae8caSAlexander Duyck 	struct page_frag_cache *nc;
4289451980aSAlexander Duyck 	unsigned long flags;
429fd11a83dSAlexander Duyck 	struct sk_buff *skb;
4309451980aSAlexander Duyck 	bool pfmemalloc;
4319451980aSAlexander Duyck 	void *data;
432fd11a83dSAlexander Duyck 
4339451980aSAlexander Duyck 	len += NET_SKB_PAD;
434fd11a83dSAlexander Duyck 
4359451980aSAlexander Duyck 	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
436d0164adcSMel Gorman 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
437a080e7bdSAlexander Duyck 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
438a080e7bdSAlexander Duyck 		if (!skb)
439a080e7bdSAlexander Duyck 			goto skb_fail;
440a080e7bdSAlexander Duyck 		goto skb_success;
441a080e7bdSAlexander Duyck 	}
4429451980aSAlexander Duyck 
4439451980aSAlexander Duyck 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4449451980aSAlexander Duyck 	len = SKB_DATA_ALIGN(len);
4459451980aSAlexander Duyck 
4469451980aSAlexander Duyck 	if (sk_memalloc_socks())
4479451980aSAlexander Duyck 		gfp_mask |= __GFP_MEMALLOC;
4489451980aSAlexander Duyck 
4499451980aSAlexander Duyck 	local_irq_save(flags);
4509451980aSAlexander Duyck 
4519451980aSAlexander Duyck 	nc = this_cpu_ptr(&netdev_alloc_cache);
4528c2dd3e4SAlexander Duyck 	data = page_frag_alloc(nc, len, gfp_mask);
4539451980aSAlexander Duyck 	pfmemalloc = nc->pfmemalloc;
4549451980aSAlexander Duyck 
4559451980aSAlexander Duyck 	local_irq_restore(flags);
4569451980aSAlexander Duyck 
4579451980aSAlexander Duyck 	if (unlikely(!data))
4589451980aSAlexander Duyck 		return NULL;
4599451980aSAlexander Duyck 
4609451980aSAlexander Duyck 	skb = __build_skb(data, len);
4619451980aSAlexander Duyck 	if (unlikely(!skb)) {
462181edb2bSAlexander Duyck 		skb_free_frag(data);
4639451980aSAlexander Duyck 		return NULL;
4649451980aSAlexander Duyck 	}
4659451980aSAlexander Duyck 
4669451980aSAlexander Duyck 	/* use OR instead of assignment to avoid clearing of bits in mask */
4679451980aSAlexander Duyck 	if (pfmemalloc)
4689451980aSAlexander Duyck 		skb->pfmemalloc = 1;
4699451980aSAlexander Duyck 	skb->head_frag = 1;
4709451980aSAlexander Duyck 
471a080e7bdSAlexander Duyck skb_success:
4728af27456SChristoph Hellwig 	skb_reserve(skb, NET_SKB_PAD);
4737b2e497aSChristoph Hellwig 	skb->dev = dev;
474fd11a83dSAlexander Duyck 
475a080e7bdSAlexander Duyck skb_fail:
4768af27456SChristoph Hellwig 	return skb;
4778af27456SChristoph Hellwig }
478b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb);
4791da177e4SLinus Torvalds 
480fd11a83dSAlexander Duyck /**
481fd11a83dSAlexander Duyck  *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
482fd11a83dSAlexander Duyck  *	@napi: napi instance this buffer was allocated for
483d7499160SMasanari Iida  *	@len: length to allocate
484fd11a83dSAlexander Duyck  *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
485fd11a83dSAlexander Duyck  *
486fd11a83dSAlexander Duyck  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
487fd11a83dSAlexander Duyck  *	attempt to allocate the head from a special reserved region used
488fd11a83dSAlexander Duyck  *	only for NAPI Rx allocation.  By doing this we can save several
489fd11a83dSAlexander Duyck  *	CPU cycles by avoiding having to disable and re-enable IRQs.
490fd11a83dSAlexander Duyck  *
491fd11a83dSAlexander Duyck  *	%NULL is returned if there is no free memory.
492fd11a83dSAlexander Duyck  */
4939451980aSAlexander Duyck struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
4949451980aSAlexander Duyck 				 gfp_t gfp_mask)
495fd11a83dSAlexander Duyck {
496795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
497fd11a83dSAlexander Duyck 	struct sk_buff *skb;
4989451980aSAlexander Duyck 	void *data;
499fd11a83dSAlexander Duyck 
5009451980aSAlexander Duyck 	len += NET_SKB_PAD + NET_IP_ALIGN;
501fd11a83dSAlexander Duyck 
5029451980aSAlexander Duyck 	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
503d0164adcSMel Gorman 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
504a080e7bdSAlexander Duyck 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
505a080e7bdSAlexander Duyck 		if (!skb)
506a080e7bdSAlexander Duyck 			goto skb_fail;
507a080e7bdSAlexander Duyck 		goto skb_success;
508a080e7bdSAlexander Duyck 	}
5099451980aSAlexander Duyck 
5109451980aSAlexander Duyck 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5119451980aSAlexander Duyck 	len = SKB_DATA_ALIGN(len);
5129451980aSAlexander Duyck 
5139451980aSAlexander Duyck 	if (sk_memalloc_socks())
5149451980aSAlexander Duyck 		gfp_mask |= __GFP_MEMALLOC;
5159451980aSAlexander Duyck 
5168c2dd3e4SAlexander Duyck 	data = page_frag_alloc(&nc->page, len, gfp_mask);
5179451980aSAlexander Duyck 	if (unlikely(!data))
5189451980aSAlexander Duyck 		return NULL;
5199451980aSAlexander Duyck 
5209451980aSAlexander Duyck 	skb = __build_skb(data, len);
5219451980aSAlexander Duyck 	if (unlikely(!skb)) {
522181edb2bSAlexander Duyck 		skb_free_frag(data);
5239451980aSAlexander Duyck 		return NULL;
5249451980aSAlexander Duyck 	}
5259451980aSAlexander Duyck 
5269451980aSAlexander Duyck 	/* use OR instead of assignment to avoid clearing of bits in mask */
527795bb1c0SJesper Dangaard Brouer 	if (nc->page.pfmemalloc)
5289451980aSAlexander Duyck 		skb->pfmemalloc = 1;
5299451980aSAlexander Duyck 	skb->head_frag = 1;
5309451980aSAlexander Duyck 
531a080e7bdSAlexander Duyck skb_success:
532fd11a83dSAlexander Duyck 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
533fd11a83dSAlexander Duyck 	skb->dev = napi->dev;
534fd11a83dSAlexander Duyck 
535a080e7bdSAlexander Duyck skb_fail:
536fd11a83dSAlexander Duyck 	return skb;
537fd11a83dSAlexander Duyck }
538fd11a83dSAlexander Duyck EXPORT_SYMBOL(__napi_alloc_skb);
539fd11a83dSAlexander Duyck 
540654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
54150269e19SEric Dumazet 		     int size, unsigned int truesize)
542654bed16SPeter Zijlstra {
543654bed16SPeter Zijlstra 	skb_fill_page_desc(skb, i, page, off, size);
544654bed16SPeter Zijlstra 	skb->len += size;
545654bed16SPeter Zijlstra 	skb->data_len += size;
54650269e19SEric Dumazet 	skb->truesize += truesize;
547654bed16SPeter Zijlstra }
548654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag);
549654bed16SPeter Zijlstra 
550f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
551f8e617e1SJason Wang 			  unsigned int truesize)
552f8e617e1SJason Wang {
553f8e617e1SJason Wang 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
554f8e617e1SJason Wang 
555f8e617e1SJason Wang 	skb_frag_size_add(frag, size);
556f8e617e1SJason Wang 	skb->len += size;
557f8e617e1SJason Wang 	skb->data_len += size;
558f8e617e1SJason Wang 	skb->truesize += truesize;
559f8e617e1SJason Wang }
560f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag);
561f8e617e1SJason Wang 
56227b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp)
5631da177e4SLinus Torvalds {
564bd8a7036SEric Dumazet 	kfree_skb_list(*listp);
56527b437c8SHerbert Xu 	*listp = NULL;
5661da177e4SLinus Torvalds }
5671da177e4SLinus Torvalds 
56827b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb)
56927b437c8SHerbert Xu {
57027b437c8SHerbert Xu 	skb_drop_list(&skb_shinfo(skb)->frag_list);
57127b437c8SHerbert Xu }
57227b437c8SHerbert Xu 
5731da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb)
5741da177e4SLinus Torvalds {
5751da177e4SLinus Torvalds 	struct sk_buff *list;
5761da177e4SLinus Torvalds 
577fbb398a8SDavid S. Miller 	skb_walk_frags(skb, list)
5781da177e4SLinus Torvalds 		skb_get(list);
5791da177e4SLinus Torvalds }
5801da177e4SLinus Torvalds 
581d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb)
582d3836f21SEric Dumazet {
583181edb2bSAlexander Duyck 	unsigned char *head = skb->head;
584181edb2bSAlexander Duyck 
585d3836f21SEric Dumazet 	if (skb->head_frag)
586181edb2bSAlexander Duyck 		skb_free_frag(head);
587d3836f21SEric Dumazet 	else
588181edb2bSAlexander Duyck 		kfree(head);
589d3836f21SEric Dumazet }
590d3836f21SEric Dumazet 
5915bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb)
5921da177e4SLinus Torvalds {
593ff04a771SEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
5941da177e4SLinus Torvalds 	int i;
595ff04a771SEric Dumazet 
596ff04a771SEric Dumazet 	if (skb->cloned &&
597ff04a771SEric Dumazet 	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
598ff04a771SEric Dumazet 			      &shinfo->dataref))
599ff04a771SEric Dumazet 		return;
600ff04a771SEric Dumazet 
601ff04a771SEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++)
602ff04a771SEric Dumazet 		__skb_frag_unref(&shinfo->frags[i]);
6031da177e4SLinus Torvalds 
604ff04a771SEric Dumazet 	if (shinfo->frag_list)
605ff04a771SEric Dumazet 		kfree_skb_list(shinfo->frag_list);
6061da177e4SLinus Torvalds 
6071f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, true);
608d3836f21SEric Dumazet 	skb_free_head(skb);
6091da177e4SLinus Torvalds }
6101da177e4SLinus Torvalds 
6111da177e4SLinus Torvalds /*
6121da177e4SLinus Torvalds  *	Free an skbuff by memory without cleaning the state.
6131da177e4SLinus Torvalds  */
6142d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb)
6151da177e4SLinus Torvalds {
616d0bf4a9eSEric Dumazet 	struct sk_buff_fclones *fclones;
617d179cd12SDavid S. Miller 
618d179cd12SDavid S. Miller 	switch (skb->fclone) {
619d179cd12SDavid S. Miller 	case SKB_FCLONE_UNAVAILABLE:
6201da177e4SLinus Torvalds 		kmem_cache_free(skbuff_head_cache, skb);
6216ffe75ebSEric Dumazet 		return;
622d179cd12SDavid S. Miller 
623d179cd12SDavid S. Miller 	case SKB_FCLONE_ORIG:
624d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
6256ffe75ebSEric Dumazet 
6266ffe75ebSEric Dumazet 		/* We usually free the clone (TX completion) before original skb
6276ffe75ebSEric Dumazet 		 * This test would have no chance to be true for the clone,
6286ffe75ebSEric Dumazet 		 * while here, branch prediction will be good.
6296ffe75ebSEric Dumazet 		 */
6302638595aSReshetova, Elena 		if (refcount_read(&fclones->fclone_ref) == 1)
6316ffe75ebSEric Dumazet 			goto fastpath;
632d179cd12SDavid S. Miller 		break;
633d179cd12SDavid S. Miller 
6346ffe75ebSEric Dumazet 	default: /* SKB_FCLONE_CLONE */
635d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
636d179cd12SDavid S. Miller 		break;
6373ff50b79SStephen Hemminger 	}
6382638595aSReshetova, Elena 	if (!refcount_dec_and_test(&fclones->fclone_ref))
6396ffe75ebSEric Dumazet 		return;
6406ffe75ebSEric Dumazet fastpath:
6416ffe75ebSEric Dumazet 	kmem_cache_free(skbuff_fclone_cache, fclones);
6421da177e4SLinus Torvalds }
6431da177e4SLinus Torvalds 
6440a463c78SPaolo Abeni void skb_release_head_state(struct sk_buff *skb)
6451da177e4SLinus Torvalds {
646adf30907SEric Dumazet 	skb_dst_drop(skb);
6471da177e4SLinus Torvalds 	if (skb->destructor) {
6489c2b3328SStephen Hemminger 		WARN_ON(in_irq());
6491da177e4SLinus Torvalds 		skb->destructor(skb);
6501da177e4SLinus Torvalds 	}
651a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK)
652cb9c6836SFlorian Westphal 	nf_conntrack_put(skb_nfct(skb));
6532fc72c7bSKOVACS Krisztian #endif
654df5042f4SFlorian Westphal 	skb_ext_put(skb);
65504a4bb55SLennert Buytenhek }
65604a4bb55SLennert Buytenhek 
65704a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */
65804a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb)
65904a4bb55SLennert Buytenhek {
66004a4bb55SLennert Buytenhek 	skb_release_head_state(skb);
661a28b1b90SFlorian Westphal 	if (likely(skb->head))
6622d4baff8SHerbert Xu 		skb_release_data(skb);
6632d4baff8SHerbert Xu }
6641da177e4SLinus Torvalds 
6652d4baff8SHerbert Xu /**
6662d4baff8SHerbert Xu  *	__kfree_skb - private function
6672d4baff8SHerbert Xu  *	@skb: buffer
6682d4baff8SHerbert Xu  *
6692d4baff8SHerbert Xu  *	Free an sk_buff. Release anything attached to the buffer.
6702d4baff8SHerbert Xu  *	Clean the state. This is an internal helper function. Users should
6712d4baff8SHerbert Xu  *	always call kfree_skb
6722d4baff8SHerbert Xu  */
6732d4baff8SHerbert Xu 
6742d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb)
6752d4baff8SHerbert Xu {
6762d4baff8SHerbert Xu 	skb_release_all(skb);
6771da177e4SLinus Torvalds 	kfree_skbmem(skb);
6781da177e4SLinus Torvalds }
679b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb);
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds /**
682231d06aeSJörn Engel  *	kfree_skb - free an sk_buff
683231d06aeSJörn Engel  *	@skb: buffer to free
684231d06aeSJörn Engel  *
685231d06aeSJörn Engel  *	Drop a reference to the buffer and free it if the usage count has
686231d06aeSJörn Engel  *	hit zero.
687231d06aeSJörn Engel  */
688231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb)
689231d06aeSJörn Engel {
6903889a803SPaolo Abeni 	if (!skb_unref(skb))
691231d06aeSJörn Engel 		return;
6923889a803SPaolo Abeni 
693ead2ceb0SNeil Horman 	trace_kfree_skb(skb, __builtin_return_address(0));
694231d06aeSJörn Engel 	__kfree_skb(skb);
695231d06aeSJörn Engel }
696b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb);
697231d06aeSJörn Engel 
698bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs)
699bd8a7036SEric Dumazet {
700bd8a7036SEric Dumazet 	while (segs) {
701bd8a7036SEric Dumazet 		struct sk_buff *next = segs->next;
702bd8a7036SEric Dumazet 
703bd8a7036SEric Dumazet 		kfree_skb(segs);
704bd8a7036SEric Dumazet 		segs = next;
705bd8a7036SEric Dumazet 	}
706bd8a7036SEric Dumazet }
707bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list);
708bd8a7036SEric Dumazet 
709d1a203eaSStephen Hemminger /**
71025121173SMichael S. Tsirkin  *	skb_tx_error - report an sk_buff xmit error
71125121173SMichael S. Tsirkin  *	@skb: buffer that triggered an error
71225121173SMichael S. Tsirkin  *
71325121173SMichael S. Tsirkin  *	Report xmit error if a device callback is tracking this skb.
71425121173SMichael S. Tsirkin  *	skb must be freed afterwards.
71525121173SMichael S. Tsirkin  */
71625121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb)
71725121173SMichael S. Tsirkin {
7181f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, true);
71925121173SMichael S. Tsirkin }
72025121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error);
72125121173SMichael S. Tsirkin 
72225121173SMichael S. Tsirkin /**
723ead2ceb0SNeil Horman  *	consume_skb - free an skbuff
724ead2ceb0SNeil Horman  *	@skb: buffer to free
725ead2ceb0SNeil Horman  *
726ead2ceb0SNeil Horman  *	Drop a ref to the buffer and free it if the usage count has hit zero
727ead2ceb0SNeil Horman  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
728ead2ceb0SNeil Horman  *	is being dropped after a failure and notes that
729ead2ceb0SNeil Horman  */
730ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb)
731ead2ceb0SNeil Horman {
7323889a803SPaolo Abeni 	if (!skb_unref(skb))
733ead2ceb0SNeil Horman 		return;
7343889a803SPaolo Abeni 
73507dc22e7SKoki Sanagi 	trace_consume_skb(skb);
736ead2ceb0SNeil Horman 	__kfree_skb(skb);
737ead2ceb0SNeil Horman }
738ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb);
739ead2ceb0SNeil Horman 
7400a463c78SPaolo Abeni /**
7410a463c78SPaolo Abeni  *	consume_stateless_skb - free an skbuff, assuming it is stateless
7420a463c78SPaolo Abeni  *	@skb: buffer to free
7430a463c78SPaolo Abeni  *
744ca2c1418SPaolo Abeni  *	Alike consume_skb(), but this variant assumes that this is the last
745ca2c1418SPaolo Abeni  *	skb reference and all the head states have been already dropped
7460a463c78SPaolo Abeni  */
747ca2c1418SPaolo Abeni void __consume_stateless_skb(struct sk_buff *skb)
7480a463c78SPaolo Abeni {
7490a463c78SPaolo Abeni 	trace_consume_skb(skb);
7500a463c78SPaolo Abeni 	skb_release_data(skb);
7510a463c78SPaolo Abeni 	kfree_skbmem(skb);
7520a463c78SPaolo Abeni }
7530a463c78SPaolo Abeni 
754795bb1c0SJesper Dangaard Brouer void __kfree_skb_flush(void)
755795bb1c0SJesper Dangaard Brouer {
756795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
757795bb1c0SJesper Dangaard Brouer 
758795bb1c0SJesper Dangaard Brouer 	/* flush skb_cache if containing objects */
759795bb1c0SJesper Dangaard Brouer 	if (nc->skb_count) {
760795bb1c0SJesper Dangaard Brouer 		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
761795bb1c0SJesper Dangaard Brouer 				     nc->skb_cache);
762795bb1c0SJesper Dangaard Brouer 		nc->skb_count = 0;
763795bb1c0SJesper Dangaard Brouer 	}
764795bb1c0SJesper Dangaard Brouer }
765795bb1c0SJesper Dangaard Brouer 
76615fad714SJesper Dangaard Brouer static inline void _kfree_skb_defer(struct sk_buff *skb)
767795bb1c0SJesper Dangaard Brouer {
768795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
769795bb1c0SJesper Dangaard Brouer 
770795bb1c0SJesper Dangaard Brouer 	/* drop skb->head and call any destructors for packet */
771795bb1c0SJesper Dangaard Brouer 	skb_release_all(skb);
772795bb1c0SJesper Dangaard Brouer 
773795bb1c0SJesper Dangaard Brouer 	/* record skb to CPU local list */
774795bb1c0SJesper Dangaard Brouer 	nc->skb_cache[nc->skb_count++] = skb;
775795bb1c0SJesper Dangaard Brouer 
776795bb1c0SJesper Dangaard Brouer #ifdef CONFIG_SLUB
777795bb1c0SJesper Dangaard Brouer 	/* SLUB writes into objects when freeing */
778795bb1c0SJesper Dangaard Brouer 	prefetchw(skb);
779795bb1c0SJesper Dangaard Brouer #endif
780795bb1c0SJesper Dangaard Brouer 
781795bb1c0SJesper Dangaard Brouer 	/* flush skb_cache if it is filled */
782795bb1c0SJesper Dangaard Brouer 	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
783795bb1c0SJesper Dangaard Brouer 		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
784795bb1c0SJesper Dangaard Brouer 				     nc->skb_cache);
785795bb1c0SJesper Dangaard Brouer 		nc->skb_count = 0;
786795bb1c0SJesper Dangaard Brouer 	}
787795bb1c0SJesper Dangaard Brouer }
78815fad714SJesper Dangaard Brouer void __kfree_skb_defer(struct sk_buff *skb)
78915fad714SJesper Dangaard Brouer {
79015fad714SJesper Dangaard Brouer 	_kfree_skb_defer(skb);
79115fad714SJesper Dangaard Brouer }
792795bb1c0SJesper Dangaard Brouer 
793795bb1c0SJesper Dangaard Brouer void napi_consume_skb(struct sk_buff *skb, int budget)
794795bb1c0SJesper Dangaard Brouer {
795795bb1c0SJesper Dangaard Brouer 	if (unlikely(!skb))
796795bb1c0SJesper Dangaard Brouer 		return;
797795bb1c0SJesper Dangaard Brouer 
798885eb0a5SJesper Dangaard Brouer 	/* Zero budget indicate non-NAPI context called us, like netpoll */
799795bb1c0SJesper Dangaard Brouer 	if (unlikely(!budget)) {
800885eb0a5SJesper Dangaard Brouer 		dev_consume_skb_any(skb);
801795bb1c0SJesper Dangaard Brouer 		return;
802795bb1c0SJesper Dangaard Brouer 	}
803795bb1c0SJesper Dangaard Brouer 
8047608894eSPaolo Abeni 	if (!skb_unref(skb))
805795bb1c0SJesper Dangaard Brouer 		return;
8067608894eSPaolo Abeni 
807795bb1c0SJesper Dangaard Brouer 	/* if reaching here SKB is ready to free */
808795bb1c0SJesper Dangaard Brouer 	trace_consume_skb(skb);
809795bb1c0SJesper Dangaard Brouer 
810795bb1c0SJesper Dangaard Brouer 	/* if SKB is a clone, don't handle this case */
811abbdb5a7SEric Dumazet 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
812795bb1c0SJesper Dangaard Brouer 		__kfree_skb(skb);
813795bb1c0SJesper Dangaard Brouer 		return;
814795bb1c0SJesper Dangaard Brouer 	}
815795bb1c0SJesper Dangaard Brouer 
81615fad714SJesper Dangaard Brouer 	_kfree_skb_defer(skb);
817795bb1c0SJesper Dangaard Brouer }
818795bb1c0SJesper Dangaard Brouer EXPORT_SYMBOL(napi_consume_skb);
819795bb1c0SJesper Dangaard Brouer 
820b1937227SEric Dumazet /* Make sure a field is enclosed inside headers_start/headers_end section */
821b1937227SEric Dumazet #define CHECK_SKB_FIELD(field) \
822b1937227SEric Dumazet 	BUILD_BUG_ON(offsetof(struct sk_buff, field) <		\
823b1937227SEric Dumazet 		     offsetof(struct sk_buff, headers_start));	\
824b1937227SEric Dumazet 	BUILD_BUG_ON(offsetof(struct sk_buff, field) >		\
825b1937227SEric Dumazet 		     offsetof(struct sk_buff, headers_end));	\
826b1937227SEric Dumazet 
827dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
828dec18810SHerbert Xu {
829dec18810SHerbert Xu 	new->tstamp		= old->tstamp;
830b1937227SEric Dumazet 	/* We do not copy old->sk */
831dec18810SHerbert Xu 	new->dev		= old->dev;
832b1937227SEric Dumazet 	memcpy(new->cb, old->cb, sizeof(old->cb));
8337fee226aSEric Dumazet 	skb_dst_copy(new, old);
834df5042f4SFlorian Westphal 	__skb_ext_copy(new, old);
835b1937227SEric Dumazet 	__nf_copy(new, old, false);
8366aa895b0SPatrick McHardy 
837b1937227SEric Dumazet 	/* Note : this field could be in headers_start/headers_end section
838b1937227SEric Dumazet 	 * It is not yet because we do not want to have a 16 bit hole
839b1937227SEric Dumazet 	 */
840b1937227SEric Dumazet 	new->queue_mapping = old->queue_mapping;
84106021292SEliezer Tamir 
842b1937227SEric Dumazet 	memcpy(&new->headers_start, &old->headers_start,
843b1937227SEric Dumazet 	       offsetof(struct sk_buff, headers_end) -
844b1937227SEric Dumazet 	       offsetof(struct sk_buff, headers_start));
845b1937227SEric Dumazet 	CHECK_SKB_FIELD(protocol);
846b1937227SEric Dumazet 	CHECK_SKB_FIELD(csum);
847b1937227SEric Dumazet 	CHECK_SKB_FIELD(hash);
848b1937227SEric Dumazet 	CHECK_SKB_FIELD(priority);
849b1937227SEric Dumazet 	CHECK_SKB_FIELD(skb_iif);
850b1937227SEric Dumazet 	CHECK_SKB_FIELD(vlan_proto);
851b1937227SEric Dumazet 	CHECK_SKB_FIELD(vlan_tci);
852b1937227SEric Dumazet 	CHECK_SKB_FIELD(transport_header);
853b1937227SEric Dumazet 	CHECK_SKB_FIELD(network_header);
854b1937227SEric Dumazet 	CHECK_SKB_FIELD(mac_header);
855b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_protocol);
856b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_transport_header);
857b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_network_header);
858b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_mac_header);
859b1937227SEric Dumazet 	CHECK_SKB_FIELD(mark);
860b1937227SEric Dumazet #ifdef CONFIG_NETWORK_SECMARK
861b1937227SEric Dumazet 	CHECK_SKB_FIELD(secmark);
862b1937227SEric Dumazet #endif
863e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL
864b1937227SEric Dumazet 	CHECK_SKB_FIELD(napi_id);
86506021292SEliezer Tamir #endif
8662bd82484SEric Dumazet #ifdef CONFIG_XPS
8672bd82484SEric Dumazet 	CHECK_SKB_FIELD(sender_cpu);
8682bd82484SEric Dumazet #endif
869b1937227SEric Dumazet #ifdef CONFIG_NET_SCHED
870b1937227SEric Dumazet 	CHECK_SKB_FIELD(tc_index);
871b1937227SEric Dumazet #endif
872b1937227SEric Dumazet 
873dec18810SHerbert Xu }
874dec18810SHerbert Xu 
87582c49a35SHerbert Xu /*
87682c49a35SHerbert Xu  * You should not add any new code to this function.  Add it to
87782c49a35SHerbert Xu  * __copy_skb_header above instead.
87882c49a35SHerbert Xu  */
879e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
8801da177e4SLinus Torvalds {
8811da177e4SLinus Torvalds #define C(x) n->x = skb->x
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds 	n->next = n->prev = NULL;
8841da177e4SLinus Torvalds 	n->sk = NULL;
885dec18810SHerbert Xu 	__copy_skb_header(n, skb);
886dec18810SHerbert Xu 
8871da177e4SLinus Torvalds 	C(len);
8881da177e4SLinus Torvalds 	C(data_len);
8893e6b3b2eSAlexey Dobriyan 	C(mac_len);
890334a8132SPatrick McHardy 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
89102f1c89dSPaul Moore 	n->cloned = 1;
8921da177e4SLinus Torvalds 	n->nohdr = 0;
893b13dda9fSEric Dumazet 	n->peeked = 0;
894e78bfb07SStefano Brivio 	C(pfmemalloc);
8951da177e4SLinus Torvalds 	n->destructor = NULL;
8961da177e4SLinus Torvalds 	C(tail);
8971da177e4SLinus Torvalds 	C(end);
89802f1c89dSPaul Moore 	C(head);
899d3836f21SEric Dumazet 	C(head_frag);
90002f1c89dSPaul Moore 	C(data);
90102f1c89dSPaul Moore 	C(truesize);
90263354797SReshetova, Elena 	refcount_set(&n->users, 1);
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds 	atomic_inc(&(skb_shinfo(skb)->dataref));
9051da177e4SLinus Torvalds 	skb->cloned = 1;
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds 	return n;
908e0053ec0SHerbert Xu #undef C
909e0053ec0SHerbert Xu }
910e0053ec0SHerbert Xu 
911e0053ec0SHerbert Xu /**
912e0053ec0SHerbert Xu  *	skb_morph	-	morph one skb into another
913e0053ec0SHerbert Xu  *	@dst: the skb to receive the contents
914e0053ec0SHerbert Xu  *	@src: the skb to supply the contents
915e0053ec0SHerbert Xu  *
916e0053ec0SHerbert Xu  *	This is identical to skb_clone except that the target skb is
917e0053ec0SHerbert Xu  *	supplied by the user.
918e0053ec0SHerbert Xu  *
919e0053ec0SHerbert Xu  *	The target skb is returned upon exit.
920e0053ec0SHerbert Xu  */
921e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
922e0053ec0SHerbert Xu {
9232d4baff8SHerbert Xu 	skb_release_all(dst);
924e0053ec0SHerbert Xu 	return __skb_clone(dst, src);
925e0053ec0SHerbert Xu }
926e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph);
927e0053ec0SHerbert Xu 
9286f89dbceSSowmini Varadhan int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
929a91dbff5SWillem de Bruijn {
930a91dbff5SWillem de Bruijn 	unsigned long max_pg, num_pg, new_pg, old_pg;
931a91dbff5SWillem de Bruijn 	struct user_struct *user;
932a91dbff5SWillem de Bruijn 
933a91dbff5SWillem de Bruijn 	if (capable(CAP_IPC_LOCK) || !size)
934a91dbff5SWillem de Bruijn 		return 0;
935a91dbff5SWillem de Bruijn 
936a91dbff5SWillem de Bruijn 	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
937a91dbff5SWillem de Bruijn 	max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
938a91dbff5SWillem de Bruijn 	user = mmp->user ? : current_user();
939a91dbff5SWillem de Bruijn 
940a91dbff5SWillem de Bruijn 	do {
941a91dbff5SWillem de Bruijn 		old_pg = atomic_long_read(&user->locked_vm);
942a91dbff5SWillem de Bruijn 		new_pg = old_pg + num_pg;
943a91dbff5SWillem de Bruijn 		if (new_pg > max_pg)
944a91dbff5SWillem de Bruijn 			return -ENOBUFS;
945a91dbff5SWillem de Bruijn 	} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
946a91dbff5SWillem de Bruijn 		 old_pg);
947a91dbff5SWillem de Bruijn 
948a91dbff5SWillem de Bruijn 	if (!mmp->user) {
949a91dbff5SWillem de Bruijn 		mmp->user = get_uid(user);
950a91dbff5SWillem de Bruijn 		mmp->num_pg = num_pg;
951a91dbff5SWillem de Bruijn 	} else {
952a91dbff5SWillem de Bruijn 		mmp->num_pg += num_pg;
953a91dbff5SWillem de Bruijn 	}
954a91dbff5SWillem de Bruijn 
955a91dbff5SWillem de Bruijn 	return 0;
956a91dbff5SWillem de Bruijn }
9576f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
958a91dbff5SWillem de Bruijn 
9596f89dbceSSowmini Varadhan void mm_unaccount_pinned_pages(struct mmpin *mmp)
960a91dbff5SWillem de Bruijn {
961a91dbff5SWillem de Bruijn 	if (mmp->user) {
962a91dbff5SWillem de Bruijn 		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
963a91dbff5SWillem de Bruijn 		free_uid(mmp->user);
964a91dbff5SWillem de Bruijn 	}
965a91dbff5SWillem de Bruijn }
9666f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
967a91dbff5SWillem de Bruijn 
96852267790SWillem de Bruijn struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
96952267790SWillem de Bruijn {
97052267790SWillem de Bruijn 	struct ubuf_info *uarg;
97152267790SWillem de Bruijn 	struct sk_buff *skb;
97252267790SWillem de Bruijn 
97352267790SWillem de Bruijn 	WARN_ON_ONCE(!in_task());
97452267790SWillem de Bruijn 
97552267790SWillem de Bruijn 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
97652267790SWillem de Bruijn 	if (!skb)
97752267790SWillem de Bruijn 		return NULL;
97852267790SWillem de Bruijn 
97952267790SWillem de Bruijn 	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
98052267790SWillem de Bruijn 	uarg = (void *)skb->cb;
981a91dbff5SWillem de Bruijn 	uarg->mmp.user = NULL;
982a91dbff5SWillem de Bruijn 
983a91dbff5SWillem de Bruijn 	if (mm_account_pinned_pages(&uarg->mmp, size)) {
984a91dbff5SWillem de Bruijn 		kfree_skb(skb);
985a91dbff5SWillem de Bruijn 		return NULL;
986a91dbff5SWillem de Bruijn 	}
98752267790SWillem de Bruijn 
98852267790SWillem de Bruijn 	uarg->callback = sock_zerocopy_callback;
9894ab6c99dSWillem de Bruijn 	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
9904ab6c99dSWillem de Bruijn 	uarg->len = 1;
9914ab6c99dSWillem de Bruijn 	uarg->bytelen = size;
99252267790SWillem de Bruijn 	uarg->zerocopy = 1;
993c1d1b437SEric Dumazet 	refcount_set(&uarg->refcnt, 1);
99452267790SWillem de Bruijn 	sock_hold(sk);
99552267790SWillem de Bruijn 
99652267790SWillem de Bruijn 	return uarg;
99752267790SWillem de Bruijn }
99852267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
99952267790SWillem de Bruijn 
100052267790SWillem de Bruijn static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
100152267790SWillem de Bruijn {
100252267790SWillem de Bruijn 	return container_of((void *)uarg, struct sk_buff, cb);
100352267790SWillem de Bruijn }
100452267790SWillem de Bruijn 
10054ab6c99dSWillem de Bruijn struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
10064ab6c99dSWillem de Bruijn 					struct ubuf_info *uarg)
10074ab6c99dSWillem de Bruijn {
10084ab6c99dSWillem de Bruijn 	if (uarg) {
10094ab6c99dSWillem de Bruijn 		const u32 byte_limit = 1 << 19;		/* limit to a few TSO */
10104ab6c99dSWillem de Bruijn 		u32 bytelen, next;
10114ab6c99dSWillem de Bruijn 
10124ab6c99dSWillem de Bruijn 		/* realloc only when socket is locked (TCP, UDP cork),
10134ab6c99dSWillem de Bruijn 		 * so uarg->len and sk_zckey access is serialized
10144ab6c99dSWillem de Bruijn 		 */
10154ab6c99dSWillem de Bruijn 		if (!sock_owned_by_user(sk)) {
10164ab6c99dSWillem de Bruijn 			WARN_ON_ONCE(1);
10174ab6c99dSWillem de Bruijn 			return NULL;
10184ab6c99dSWillem de Bruijn 		}
10194ab6c99dSWillem de Bruijn 
10204ab6c99dSWillem de Bruijn 		bytelen = uarg->bytelen + size;
10214ab6c99dSWillem de Bruijn 		if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
10224ab6c99dSWillem de Bruijn 			/* TCP can create new skb to attach new uarg */
10234ab6c99dSWillem de Bruijn 			if (sk->sk_type == SOCK_STREAM)
10244ab6c99dSWillem de Bruijn 				goto new_alloc;
10254ab6c99dSWillem de Bruijn 			return NULL;
10264ab6c99dSWillem de Bruijn 		}
10274ab6c99dSWillem de Bruijn 
10284ab6c99dSWillem de Bruijn 		next = (u32)atomic_read(&sk->sk_zckey);
10294ab6c99dSWillem de Bruijn 		if ((u32)(uarg->id + uarg->len) == next) {
1030a91dbff5SWillem de Bruijn 			if (mm_account_pinned_pages(&uarg->mmp, size))
1031a91dbff5SWillem de Bruijn 				return NULL;
10324ab6c99dSWillem de Bruijn 			uarg->len++;
10334ab6c99dSWillem de Bruijn 			uarg->bytelen = bytelen;
10344ab6c99dSWillem de Bruijn 			atomic_set(&sk->sk_zckey, ++next);
1035db5bce32SEric Dumazet 			sock_zerocopy_get(uarg);
10364ab6c99dSWillem de Bruijn 			return uarg;
10374ab6c99dSWillem de Bruijn 		}
10384ab6c99dSWillem de Bruijn 	}
10394ab6c99dSWillem de Bruijn 
10404ab6c99dSWillem de Bruijn new_alloc:
10414ab6c99dSWillem de Bruijn 	return sock_zerocopy_alloc(sk, size);
10424ab6c99dSWillem de Bruijn }
10434ab6c99dSWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
10444ab6c99dSWillem de Bruijn 
10454ab6c99dSWillem de Bruijn static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
10464ab6c99dSWillem de Bruijn {
10474ab6c99dSWillem de Bruijn 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
10484ab6c99dSWillem de Bruijn 	u32 old_lo, old_hi;
10494ab6c99dSWillem de Bruijn 	u64 sum_len;
10504ab6c99dSWillem de Bruijn 
10514ab6c99dSWillem de Bruijn 	old_lo = serr->ee.ee_info;
10524ab6c99dSWillem de Bruijn 	old_hi = serr->ee.ee_data;
10534ab6c99dSWillem de Bruijn 	sum_len = old_hi - old_lo + 1ULL + len;
10544ab6c99dSWillem de Bruijn 
10554ab6c99dSWillem de Bruijn 	if (sum_len >= (1ULL << 32))
10564ab6c99dSWillem de Bruijn 		return false;
10574ab6c99dSWillem de Bruijn 
10584ab6c99dSWillem de Bruijn 	if (lo != old_hi + 1)
10594ab6c99dSWillem de Bruijn 		return false;
10604ab6c99dSWillem de Bruijn 
10614ab6c99dSWillem de Bruijn 	serr->ee.ee_data += len;
10624ab6c99dSWillem de Bruijn 	return true;
10634ab6c99dSWillem de Bruijn }
10644ab6c99dSWillem de Bruijn 
106552267790SWillem de Bruijn void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
106652267790SWillem de Bruijn {
10674ab6c99dSWillem de Bruijn 	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
106852267790SWillem de Bruijn 	struct sock_exterr_skb *serr;
106952267790SWillem de Bruijn 	struct sock *sk = skb->sk;
10704ab6c99dSWillem de Bruijn 	struct sk_buff_head *q;
10714ab6c99dSWillem de Bruijn 	unsigned long flags;
10724ab6c99dSWillem de Bruijn 	u32 lo, hi;
10734ab6c99dSWillem de Bruijn 	u16 len;
107452267790SWillem de Bruijn 
1075ccaffff1SWillem de Bruijn 	mm_unaccount_pinned_pages(&uarg->mmp);
1076ccaffff1SWillem de Bruijn 
10774ab6c99dSWillem de Bruijn 	/* if !len, there was only 1 call, and it was aborted
10784ab6c99dSWillem de Bruijn 	 * so do not queue a completion notification
10794ab6c99dSWillem de Bruijn 	 */
10804ab6c99dSWillem de Bruijn 	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
108152267790SWillem de Bruijn 		goto release;
108252267790SWillem de Bruijn 
10834ab6c99dSWillem de Bruijn 	len = uarg->len;
10844ab6c99dSWillem de Bruijn 	lo = uarg->id;
10854ab6c99dSWillem de Bruijn 	hi = uarg->id + len - 1;
10864ab6c99dSWillem de Bruijn 
108752267790SWillem de Bruijn 	serr = SKB_EXT_ERR(skb);
108852267790SWillem de Bruijn 	memset(serr, 0, sizeof(*serr));
108952267790SWillem de Bruijn 	serr->ee.ee_errno = 0;
109052267790SWillem de Bruijn 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
10914ab6c99dSWillem de Bruijn 	serr->ee.ee_data = hi;
10924ab6c99dSWillem de Bruijn 	serr->ee.ee_info = lo;
109352267790SWillem de Bruijn 	if (!success)
109452267790SWillem de Bruijn 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
109552267790SWillem de Bruijn 
10964ab6c99dSWillem de Bruijn 	q = &sk->sk_error_queue;
10974ab6c99dSWillem de Bruijn 	spin_lock_irqsave(&q->lock, flags);
10984ab6c99dSWillem de Bruijn 	tail = skb_peek_tail(q);
10994ab6c99dSWillem de Bruijn 	if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
11004ab6c99dSWillem de Bruijn 	    !skb_zerocopy_notify_extend(tail, lo, len)) {
11014ab6c99dSWillem de Bruijn 		__skb_queue_tail(q, skb);
110252267790SWillem de Bruijn 		skb = NULL;
11034ab6c99dSWillem de Bruijn 	}
11044ab6c99dSWillem de Bruijn 	spin_unlock_irqrestore(&q->lock, flags);
110552267790SWillem de Bruijn 
110652267790SWillem de Bruijn 	sk->sk_error_report(sk);
110752267790SWillem de Bruijn 
110852267790SWillem de Bruijn release:
110952267790SWillem de Bruijn 	consume_skb(skb);
111052267790SWillem de Bruijn 	sock_put(sk);
111152267790SWillem de Bruijn }
111252267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
111352267790SWillem de Bruijn 
111452267790SWillem de Bruijn void sock_zerocopy_put(struct ubuf_info *uarg)
111552267790SWillem de Bruijn {
1116c1d1b437SEric Dumazet 	if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
111752267790SWillem de Bruijn 		if (uarg->callback)
111852267790SWillem de Bruijn 			uarg->callback(uarg, uarg->zerocopy);
111952267790SWillem de Bruijn 		else
112052267790SWillem de Bruijn 			consume_skb(skb_from_uarg(uarg));
112152267790SWillem de Bruijn 	}
112252267790SWillem de Bruijn }
112352267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_put);
112452267790SWillem de Bruijn 
112552900d22SWillem de Bruijn void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
112652267790SWillem de Bruijn {
112752267790SWillem de Bruijn 	if (uarg) {
112852267790SWillem de Bruijn 		struct sock *sk = skb_from_uarg(uarg)->sk;
112952267790SWillem de Bruijn 
113052267790SWillem de Bruijn 		atomic_dec(&sk->sk_zckey);
11314ab6c99dSWillem de Bruijn 		uarg->len--;
113252267790SWillem de Bruijn 
113352900d22SWillem de Bruijn 		if (have_uref)
113452267790SWillem de Bruijn 			sock_zerocopy_put(uarg);
113552267790SWillem de Bruijn 	}
113652267790SWillem de Bruijn }
113752267790SWillem de Bruijn EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
113852267790SWillem de Bruijn 
1139b5947e5dSWillem de Bruijn int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1140b5947e5dSWillem de Bruijn {
1141b5947e5dSWillem de Bruijn 	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1142b5947e5dSWillem de Bruijn }
1143b5947e5dSWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1144b5947e5dSWillem de Bruijn 
114552267790SWillem de Bruijn int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
114652267790SWillem de Bruijn 			     struct msghdr *msg, int len,
114752267790SWillem de Bruijn 			     struct ubuf_info *uarg)
114852267790SWillem de Bruijn {
11494ab6c99dSWillem de Bruijn 	struct ubuf_info *orig_uarg = skb_zcopy(skb);
115052267790SWillem de Bruijn 	struct iov_iter orig_iter = msg->msg_iter;
115152267790SWillem de Bruijn 	int err, orig_len = skb->len;
115252267790SWillem de Bruijn 
11534ab6c99dSWillem de Bruijn 	/* An skb can only point to one uarg. This edge case happens when
11544ab6c99dSWillem de Bruijn 	 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
11554ab6c99dSWillem de Bruijn 	 */
11564ab6c99dSWillem de Bruijn 	if (orig_uarg && uarg != orig_uarg)
11574ab6c99dSWillem de Bruijn 		return -EEXIST;
11584ab6c99dSWillem de Bruijn 
115952267790SWillem de Bruijn 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
116052267790SWillem de Bruijn 	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
116154d43117SWillem de Bruijn 		struct sock *save_sk = skb->sk;
116254d43117SWillem de Bruijn 
116352267790SWillem de Bruijn 		/* Streams do not free skb on error. Reset to prev state. */
116452267790SWillem de Bruijn 		msg->msg_iter = orig_iter;
116554d43117SWillem de Bruijn 		skb->sk = sk;
116652267790SWillem de Bruijn 		___pskb_trim(skb, orig_len);
116754d43117SWillem de Bruijn 		skb->sk = save_sk;
116852267790SWillem de Bruijn 		return err;
116952267790SWillem de Bruijn 	}
117052267790SWillem de Bruijn 
117152900d22SWillem de Bruijn 	skb_zcopy_set(skb, uarg, NULL);
117252267790SWillem de Bruijn 	return skb->len - orig_len;
117352267790SWillem de Bruijn }
117452267790SWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
117552267790SWillem de Bruijn 
11761f8b977aSWillem de Bruijn static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
117752267790SWillem de Bruijn 			      gfp_t gfp_mask)
117852267790SWillem de Bruijn {
117952267790SWillem de Bruijn 	if (skb_zcopy(orig)) {
118052267790SWillem de Bruijn 		if (skb_zcopy(nskb)) {
118152267790SWillem de Bruijn 			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
118252267790SWillem de Bruijn 			if (!gfp_mask) {
118352267790SWillem de Bruijn 				WARN_ON_ONCE(1);
118452267790SWillem de Bruijn 				return -ENOMEM;
118552267790SWillem de Bruijn 			}
118652267790SWillem de Bruijn 			if (skb_uarg(nskb) == skb_uarg(orig))
118752267790SWillem de Bruijn 				return 0;
118852267790SWillem de Bruijn 			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
118952267790SWillem de Bruijn 				return -EIO;
119052267790SWillem de Bruijn 		}
119152900d22SWillem de Bruijn 		skb_zcopy_set(nskb, skb_uarg(orig), NULL);
119252267790SWillem de Bruijn 	}
119352267790SWillem de Bruijn 	return 0;
119452267790SWillem de Bruijn }
119552267790SWillem de Bruijn 
11962c53040fSBen Hutchings /**
11972c53040fSBen Hutchings  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
119848c83012SMichael S. Tsirkin  *	@skb: the skb to modify
119948c83012SMichael S. Tsirkin  *	@gfp_mask: allocation priority
120048c83012SMichael S. Tsirkin  *
120148c83012SMichael S. Tsirkin  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
120248c83012SMichael S. Tsirkin  *	It will copy all frags into kernel and drop the reference
120348c83012SMichael S. Tsirkin  *	to userspace pages.
120448c83012SMichael S. Tsirkin  *
120548c83012SMichael S. Tsirkin  *	If this function is called from an interrupt gfp_mask() must be
120648c83012SMichael S. Tsirkin  *	%GFP_ATOMIC.
120748c83012SMichael S. Tsirkin  *
120848c83012SMichael S. Tsirkin  *	Returns 0 on success or a negative error code on failure
120948c83012SMichael S. Tsirkin  *	to allocate kernel memory to copy to.
121048c83012SMichael S. Tsirkin  */
121148c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1212a6686f2fSShirley Ma {
1213a6686f2fSShirley Ma 	int num_frags = skb_shinfo(skb)->nr_frags;
1214a6686f2fSShirley Ma 	struct page *page, *head = NULL;
12153ece7826SWillem de Bruijn 	int i, new_frags;
12163ece7826SWillem de Bruijn 	u32 d_off;
1217a6686f2fSShirley Ma 
12183ece7826SWillem de Bruijn 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
12193ece7826SWillem de Bruijn 		return -EINVAL;
12203ece7826SWillem de Bruijn 
1221f72c4ac6SWillem de Bruijn 	if (!num_frags)
1222f72c4ac6SWillem de Bruijn 		goto release;
1223f72c4ac6SWillem de Bruijn 
12243ece7826SWillem de Bruijn 	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
12253ece7826SWillem de Bruijn 	for (i = 0; i < new_frags; i++) {
122602756ed4SKrishna Kumar 		page = alloc_page(gfp_mask);
1227a6686f2fSShirley Ma 		if (!page) {
1228a6686f2fSShirley Ma 			while (head) {
122940dadff2SSunghan Suh 				struct page *next = (struct page *)page_private(head);
1230a6686f2fSShirley Ma 				put_page(head);
1231a6686f2fSShirley Ma 				head = next;
1232a6686f2fSShirley Ma 			}
1233a6686f2fSShirley Ma 			return -ENOMEM;
1234a6686f2fSShirley Ma 		}
12353ece7826SWillem de Bruijn 		set_page_private(page, (unsigned long)head);
12363ece7826SWillem de Bruijn 		head = page;
12373ece7826SWillem de Bruijn 	}
12383ece7826SWillem de Bruijn 
12393ece7826SWillem de Bruijn 	page = head;
12403ece7826SWillem de Bruijn 	d_off = 0;
12413ece7826SWillem de Bruijn 	for (i = 0; i < num_frags; i++) {
12423ece7826SWillem de Bruijn 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
12433ece7826SWillem de Bruijn 		u32 p_off, p_len, copied;
12443ece7826SWillem de Bruijn 		struct page *p;
12453ece7826SWillem de Bruijn 		u8 *vaddr;
1246c613c209SWillem de Bruijn 
1247c613c209SWillem de Bruijn 		skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
1248c613c209SWillem de Bruijn 				      p, p_off, p_len, copied) {
12493ece7826SWillem de Bruijn 			u32 copy, done = 0;
1250c613c209SWillem de Bruijn 			vaddr = kmap_atomic(p);
12513ece7826SWillem de Bruijn 
12523ece7826SWillem de Bruijn 			while (done < p_len) {
12533ece7826SWillem de Bruijn 				if (d_off == PAGE_SIZE) {
12543ece7826SWillem de Bruijn 					d_off = 0;
12553ece7826SWillem de Bruijn 					page = (struct page *)page_private(page);
12563ece7826SWillem de Bruijn 				}
12573ece7826SWillem de Bruijn 				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
12583ece7826SWillem de Bruijn 				memcpy(page_address(page) + d_off,
12593ece7826SWillem de Bruijn 				       vaddr + p_off + done, copy);
12603ece7826SWillem de Bruijn 				done += copy;
12613ece7826SWillem de Bruijn 				d_off += copy;
12623ece7826SWillem de Bruijn 			}
126351c56b00SEric Dumazet 			kunmap_atomic(vaddr);
1264c613c209SWillem de Bruijn 		}
1265a6686f2fSShirley Ma 	}
1266a6686f2fSShirley Ma 
1267a6686f2fSShirley Ma 	/* skb frags release userspace buffers */
126802756ed4SKrishna Kumar 	for (i = 0; i < num_frags; i++)
1269a8605c60SIan Campbell 		skb_frag_unref(skb, i);
1270a6686f2fSShirley Ma 
1271a6686f2fSShirley Ma 	/* skb frags point to kernel buffers */
12723ece7826SWillem de Bruijn 	for (i = 0; i < new_frags - 1; i++) {
12733ece7826SWillem de Bruijn 		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
127440dadff2SSunghan Suh 		head = (struct page *)page_private(head);
1275a6686f2fSShirley Ma 	}
12763ece7826SWillem de Bruijn 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
12773ece7826SWillem de Bruijn 	skb_shinfo(skb)->nr_frags = new_frags;
127848c83012SMichael S. Tsirkin 
1279b90ddd56SWillem de Bruijn release:
12801f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, false);
1281a6686f2fSShirley Ma 	return 0;
1282a6686f2fSShirley Ma }
1283dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1284a6686f2fSShirley Ma 
1285e0053ec0SHerbert Xu /**
1286e0053ec0SHerbert Xu  *	skb_clone	-	duplicate an sk_buff
1287e0053ec0SHerbert Xu  *	@skb: buffer to clone
1288e0053ec0SHerbert Xu  *	@gfp_mask: allocation priority
1289e0053ec0SHerbert Xu  *
1290e0053ec0SHerbert Xu  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
1291e0053ec0SHerbert Xu  *	copies share the same packet data but not structure. The new
1292e0053ec0SHerbert Xu  *	buffer has a reference count of 1. If the allocation fails the
1293e0053ec0SHerbert Xu  *	function returns %NULL otherwise the new buffer is returned.
1294e0053ec0SHerbert Xu  *
1295e0053ec0SHerbert Xu  *	If this function is called from an interrupt gfp_mask() must be
1296e0053ec0SHerbert Xu  *	%GFP_ATOMIC.
1297e0053ec0SHerbert Xu  */
1298e0053ec0SHerbert Xu 
1299e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1300e0053ec0SHerbert Xu {
1301d0bf4a9eSEric Dumazet 	struct sk_buff_fclones *fclones = container_of(skb,
1302d0bf4a9eSEric Dumazet 						       struct sk_buff_fclones,
1303d0bf4a9eSEric Dumazet 						       skb1);
13046ffe75ebSEric Dumazet 	struct sk_buff *n;
1305e0053ec0SHerbert Xu 
130670008aa5SMichael S. Tsirkin 	if (skb_orphan_frags(skb, gfp_mask))
1307a6686f2fSShirley Ma 		return NULL;
1308a6686f2fSShirley Ma 
1309e0053ec0SHerbert Xu 	if (skb->fclone == SKB_FCLONE_ORIG &&
13102638595aSReshetova, Elena 	    refcount_read(&fclones->fclone_ref) == 1) {
13116ffe75ebSEric Dumazet 		n = &fclones->skb2;
13122638595aSReshetova, Elena 		refcount_set(&fclones->fclone_ref, 2);
1313e0053ec0SHerbert Xu 	} else {
1314c93bdd0eSMel Gorman 		if (skb_pfmemalloc(skb))
1315c93bdd0eSMel Gorman 			gfp_mask |= __GFP_MEMALLOC;
1316c93bdd0eSMel Gorman 
1317e0053ec0SHerbert Xu 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1318e0053ec0SHerbert Xu 		if (!n)
1319e0053ec0SHerbert Xu 			return NULL;
1320fe55f6d5SVegard Nossum 
1321e0053ec0SHerbert Xu 		n->fclone = SKB_FCLONE_UNAVAILABLE;
1322e0053ec0SHerbert Xu 	}
1323e0053ec0SHerbert Xu 
1324e0053ec0SHerbert Xu 	return __skb_clone(n, skb);
13251da177e4SLinus Torvalds }
1326b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone);
13271da177e4SLinus Torvalds 
1328b0768a86SToshiaki Makita void skb_headers_offset_update(struct sk_buff *skb, int off)
1329f5b17294SPravin B Shelar {
1330030737bcSEric Dumazet 	/* Only adjust this if it actually is csum_start rather than csum */
1331030737bcSEric Dumazet 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1332030737bcSEric Dumazet 		skb->csum_start += off;
1333f5b17294SPravin B Shelar 	/* {transport,network,mac}_header and tail are relative to skb->head */
1334f5b17294SPravin B Shelar 	skb->transport_header += off;
1335f5b17294SPravin B Shelar 	skb->network_header   += off;
1336f5b17294SPravin B Shelar 	if (skb_mac_header_was_set(skb))
1337f5b17294SPravin B Shelar 		skb->mac_header += off;
1338f5b17294SPravin B Shelar 	skb->inner_transport_header += off;
1339f5b17294SPravin B Shelar 	skb->inner_network_header += off;
1340aefbd2b3SPravin B Shelar 	skb->inner_mac_header += off;
1341f5b17294SPravin B Shelar }
1342b0768a86SToshiaki Makita EXPORT_SYMBOL(skb_headers_offset_update);
1343f5b17294SPravin B Shelar 
134408303c18SIlya Lesokhin void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
13451da177e4SLinus Torvalds {
1346dec18810SHerbert Xu 	__copy_skb_header(new, old);
1347dec18810SHerbert Xu 
13487967168cSHerbert Xu 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
13497967168cSHerbert Xu 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
13507967168cSHerbert Xu 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
13511da177e4SLinus Torvalds }
135208303c18SIlya Lesokhin EXPORT_SYMBOL(skb_copy_header);
13531da177e4SLinus Torvalds 
1354c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1355c93bdd0eSMel Gorman {
1356c93bdd0eSMel Gorman 	if (skb_pfmemalloc(skb))
1357c93bdd0eSMel Gorman 		return SKB_ALLOC_RX;
1358c93bdd0eSMel Gorman 	return 0;
1359c93bdd0eSMel Gorman }
1360c93bdd0eSMel Gorman 
13611da177e4SLinus Torvalds /**
13621da177e4SLinus Torvalds  *	skb_copy	-	create private copy of an sk_buff
13631da177e4SLinus Torvalds  *	@skb: buffer to copy
13641da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
13651da177e4SLinus Torvalds  *
13661da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and its data. This is used when the
13671da177e4SLinus Torvalds  *	caller wishes to modify the data and needs a private copy of the
13681da177e4SLinus Torvalds  *	data to alter. Returns %NULL on failure or the pointer to the buffer
13691da177e4SLinus Torvalds  *	on success. The returned buffer has a reference count of 1.
13701da177e4SLinus Torvalds  *
13711da177e4SLinus Torvalds  *	As by-product this function converts non-linear &sk_buff to linear
13721da177e4SLinus Torvalds  *	one, so that &sk_buff becomes completely private and caller is allowed
13731da177e4SLinus Torvalds  *	to modify all the data of returned buffer. This means that this
13741da177e4SLinus Torvalds  *	function is not recommended for use in circumstances when only
13751da177e4SLinus Torvalds  *	header is going to be modified. Use pskb_copy() instead.
13761da177e4SLinus Torvalds  */
13771da177e4SLinus Torvalds 
1378dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
13791da177e4SLinus Torvalds {
13806602cebbSEric Dumazet 	int headerlen = skb_headroom(skb);
1381ec47ea82SAlexander Duyck 	unsigned int size = skb_end_offset(skb) + skb->data_len;
1382c93bdd0eSMel Gorman 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
1383c93bdd0eSMel Gorman 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
13846602cebbSEric Dumazet 
13851da177e4SLinus Torvalds 	if (!n)
13861da177e4SLinus Torvalds 		return NULL;
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 	/* Set the data pointer */
13891da177e4SLinus Torvalds 	skb_reserve(n, headerlen);
13901da177e4SLinus Torvalds 	/* Set the tail pointer and length */
13911da177e4SLinus Torvalds 	skb_put(n, skb->len);
13921da177e4SLinus Torvalds 
13939f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
13941da177e4SLinus Torvalds 
139508303c18SIlya Lesokhin 	skb_copy_header(n, skb);
13961da177e4SLinus Torvalds 	return n;
13971da177e4SLinus Torvalds }
1398b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy);
13991da177e4SLinus Torvalds 
14001da177e4SLinus Torvalds /**
1401bad93e9dSOctavian Purdila  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
14021da177e4SLinus Torvalds  *	@skb: buffer to copy
1403117632e6SEric Dumazet  *	@headroom: headroom of new skb
14041da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
1405bad93e9dSOctavian Purdila  *	@fclone: if true allocate the copy of the skb from the fclone
1406bad93e9dSOctavian Purdila  *	cache instead of the head cache; it is recommended to set this
1407bad93e9dSOctavian Purdila  *	to true for the cases where the copy will likely be cloned
14081da177e4SLinus Torvalds  *
14091da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and part of its data, located
14101da177e4SLinus Torvalds  *	in header. Fragmented data remain shared. This is used when
14111da177e4SLinus Torvalds  *	the caller wishes to modify only header of &sk_buff and needs
14121da177e4SLinus Torvalds  *	private copy of the header to alter. Returns %NULL on failure
14131da177e4SLinus Torvalds  *	or the pointer to the buffer on success.
14141da177e4SLinus Torvalds  *	The returned buffer has a reference count of 1.
14151da177e4SLinus Torvalds  */
14161da177e4SLinus Torvalds 
1417bad93e9dSOctavian Purdila struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1418bad93e9dSOctavian Purdila 				   gfp_t gfp_mask, bool fclone)
14191da177e4SLinus Torvalds {
1420117632e6SEric Dumazet 	unsigned int size = skb_headlen(skb) + headroom;
1421bad93e9dSOctavian Purdila 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1422bad93e9dSOctavian Purdila 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
14236602cebbSEric Dumazet 
14241da177e4SLinus Torvalds 	if (!n)
14251da177e4SLinus Torvalds 		goto out;
14261da177e4SLinus Torvalds 
14271da177e4SLinus Torvalds 	/* Set the data pointer */
1428117632e6SEric Dumazet 	skb_reserve(n, headroom);
14291da177e4SLinus Torvalds 	/* Set the tail pointer and length */
14301da177e4SLinus Torvalds 	skb_put(n, skb_headlen(skb));
14311da177e4SLinus Torvalds 	/* Copy the bytes */
1432d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data(skb, n->data, n->len);
14331da177e4SLinus Torvalds 
143425f484a6SHerbert Xu 	n->truesize += skb->data_len;
14351da177e4SLinus Torvalds 	n->data_len  = skb->data_len;
14361da177e4SLinus Torvalds 	n->len	     = skb->len;
14371da177e4SLinus Torvalds 
14381da177e4SLinus Torvalds 	if (skb_shinfo(skb)->nr_frags) {
14391da177e4SLinus Torvalds 		int i;
14401da177e4SLinus Torvalds 
14411f8b977aSWillem de Bruijn 		if (skb_orphan_frags(skb, gfp_mask) ||
14421f8b977aSWillem de Bruijn 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
14431511022cSDan Carpenter 			kfree_skb(n);
14441511022cSDan Carpenter 			n = NULL;
1445a6686f2fSShirley Ma 			goto out;
1446a6686f2fSShirley Ma 		}
14471da177e4SLinus Torvalds 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
14481da177e4SLinus Torvalds 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1449ea2ab693SIan Campbell 			skb_frag_ref(skb, i);
14501da177e4SLinus Torvalds 		}
14511da177e4SLinus Torvalds 		skb_shinfo(n)->nr_frags = i;
14521da177e4SLinus Torvalds 	}
14531da177e4SLinus Torvalds 
145421dc3301SDavid S. Miller 	if (skb_has_frag_list(skb)) {
14551da177e4SLinus Torvalds 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
14561da177e4SLinus Torvalds 		skb_clone_fraglist(n);
14571da177e4SLinus Torvalds 	}
14581da177e4SLinus Torvalds 
145908303c18SIlya Lesokhin 	skb_copy_header(n, skb);
14601da177e4SLinus Torvalds out:
14611da177e4SLinus Torvalds 	return n;
14621da177e4SLinus Torvalds }
1463bad93e9dSOctavian Purdila EXPORT_SYMBOL(__pskb_copy_fclone);
14641da177e4SLinus Torvalds 
14651da177e4SLinus Torvalds /**
14661da177e4SLinus Torvalds  *	pskb_expand_head - reallocate header of &sk_buff
14671da177e4SLinus Torvalds  *	@skb: buffer to reallocate
14681da177e4SLinus Torvalds  *	@nhead: room to add at head
14691da177e4SLinus Torvalds  *	@ntail: room to add at tail
14701da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
14711da177e4SLinus Torvalds  *
1472bc32383cSMathias Krause  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1473bc32383cSMathias Krause  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
14741da177e4SLinus Torvalds  *	reference count of 1. Returns zero in the case of success or error,
14751da177e4SLinus Torvalds  *	if expansion failed. In the last case, &sk_buff is not changed.
14761da177e4SLinus Torvalds  *
14771da177e4SLinus Torvalds  *	All the pointers pointing into skb header may change and must be
14781da177e4SLinus Torvalds  *	reloaded after call to this function.
14791da177e4SLinus Torvalds  */
14801da177e4SLinus Torvalds 
148186a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1482dd0fc66fSAl Viro 		     gfp_t gfp_mask)
14831da177e4SLinus Torvalds {
1484158f323bSEric Dumazet 	int i, osize = skb_end_offset(skb);
1485158f323bSEric Dumazet 	int size = osize + nhead + ntail;
14861da177e4SLinus Torvalds 	long off;
1487158f323bSEric Dumazet 	u8 *data;
14881da177e4SLinus Torvalds 
14894edd87adSHerbert Xu 	BUG_ON(nhead < 0);
14904edd87adSHerbert Xu 
14919f77fad3STim Hansen 	BUG_ON(skb_shared(skb));
14921da177e4SLinus Torvalds 
14931da177e4SLinus Torvalds 	size = SKB_DATA_ALIGN(size);
14941da177e4SLinus Torvalds 
1495c93bdd0eSMel Gorman 	if (skb_pfmemalloc(skb))
1496c93bdd0eSMel Gorman 		gfp_mask |= __GFP_MEMALLOC;
1497c93bdd0eSMel Gorman 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1498c93bdd0eSMel Gorman 			       gfp_mask, NUMA_NO_NODE, NULL);
14991da177e4SLinus Torvalds 	if (!data)
15001da177e4SLinus Torvalds 		goto nodata;
150187151b86SEric Dumazet 	size = SKB_WITH_OVERHEAD(ksize(data));
15021da177e4SLinus Torvalds 
15031da177e4SLinus Torvalds 	/* Copy only real data... and, alas, header. This should be
15046602cebbSEric Dumazet 	 * optimized for the cases when header is void.
15056602cebbSEric Dumazet 	 */
15066602cebbSEric Dumazet 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
15076602cebbSEric Dumazet 
15086602cebbSEric Dumazet 	memcpy((struct skb_shared_info *)(data + size),
15096602cebbSEric Dumazet 	       skb_shinfo(skb),
1510fed66381SEric Dumazet 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
15111da177e4SLinus Torvalds 
15123e24591aSAlexander Duyck 	/*
15133e24591aSAlexander Duyck 	 * if shinfo is shared we must drop the old head gracefully, but if it
15143e24591aSAlexander Duyck 	 * is not we can just drop the old head and let the existing refcount
15153e24591aSAlexander Duyck 	 * be since all we did is relocate the values
15163e24591aSAlexander Duyck 	 */
15173e24591aSAlexander Duyck 	if (skb_cloned(skb)) {
151870008aa5SMichael S. Tsirkin 		if (skb_orphan_frags(skb, gfp_mask))
1519a6686f2fSShirley Ma 			goto nofrags;
15201f8b977aSWillem de Bruijn 		if (skb_zcopy(skb))
1521c1d1b437SEric Dumazet 			refcount_inc(&skb_uarg(skb)->refcnt);
15221da177e4SLinus Torvalds 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1523ea2ab693SIan Campbell 			skb_frag_ref(skb, i);
15241da177e4SLinus Torvalds 
152521dc3301SDavid S. Miller 		if (skb_has_frag_list(skb))
15261da177e4SLinus Torvalds 			skb_clone_fraglist(skb);
15271da177e4SLinus Torvalds 
15281da177e4SLinus Torvalds 		skb_release_data(skb);
15293e24591aSAlexander Duyck 	} else {
15303e24591aSAlexander Duyck 		skb_free_head(skb);
15311fd63041SEric Dumazet 	}
15321da177e4SLinus Torvalds 	off = (data + nhead) - skb->head;
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds 	skb->head     = data;
1535d3836f21SEric Dumazet 	skb->head_frag = 0;
15361da177e4SLinus Torvalds 	skb->data    += off;
15374305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET
15384305b541SArnaldo Carvalho de Melo 	skb->end      = size;
153956eb8882SPatrick McHardy 	off           = nhead;
15404305b541SArnaldo Carvalho de Melo #else
15414305b541SArnaldo Carvalho de Melo 	skb->end      = skb->head + size;
154256eb8882SPatrick McHardy #endif
154327a884dcSArnaldo Carvalho de Melo 	skb->tail	      += off;
1544b41abb42SPeter Pan(潘卫平) 	skb_headers_offset_update(skb, nhead);
15451da177e4SLinus Torvalds 	skb->cloned   = 0;
1546334a8132SPatrick McHardy 	skb->hdr_len  = 0;
15471da177e4SLinus Torvalds 	skb->nohdr    = 0;
15481da177e4SLinus Torvalds 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1549158f323bSEric Dumazet 
1550de8f3a83SDaniel Borkmann 	skb_metadata_clear(skb);
1551de8f3a83SDaniel Borkmann 
1552158f323bSEric Dumazet 	/* It is not generally safe to change skb->truesize.
1553158f323bSEric Dumazet 	 * For the moment, we really care of rx path, or
1554158f323bSEric Dumazet 	 * when skb is orphaned (not attached to a socket).
1555158f323bSEric Dumazet 	 */
1556158f323bSEric Dumazet 	if (!skb->sk || skb->destructor == sock_edemux)
1557158f323bSEric Dumazet 		skb->truesize += size - osize;
1558158f323bSEric Dumazet 
15591da177e4SLinus Torvalds 	return 0;
15601da177e4SLinus Torvalds 
1561a6686f2fSShirley Ma nofrags:
1562a6686f2fSShirley Ma 	kfree(data);
15631da177e4SLinus Torvalds nodata:
15641da177e4SLinus Torvalds 	return -ENOMEM;
15651da177e4SLinus Torvalds }
1566b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head);
15671da177e4SLinus Torvalds 
15681da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
15711da177e4SLinus Torvalds {
15721da177e4SLinus Torvalds 	struct sk_buff *skb2;
15731da177e4SLinus Torvalds 	int delta = headroom - skb_headroom(skb);
15741da177e4SLinus Torvalds 
15751da177e4SLinus Torvalds 	if (delta <= 0)
15761da177e4SLinus Torvalds 		skb2 = pskb_copy(skb, GFP_ATOMIC);
15771da177e4SLinus Torvalds 	else {
15781da177e4SLinus Torvalds 		skb2 = skb_clone(skb, GFP_ATOMIC);
15791da177e4SLinus Torvalds 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
15801da177e4SLinus Torvalds 					     GFP_ATOMIC)) {
15811da177e4SLinus Torvalds 			kfree_skb(skb2);
15821da177e4SLinus Torvalds 			skb2 = NULL;
15831da177e4SLinus Torvalds 		}
15841da177e4SLinus Torvalds 	}
15851da177e4SLinus Torvalds 	return skb2;
15861da177e4SLinus Torvalds }
1587b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom);
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds /**
15901da177e4SLinus Torvalds  *	skb_copy_expand	-	copy and expand sk_buff
15911da177e4SLinus Torvalds  *	@skb: buffer to copy
15921da177e4SLinus Torvalds  *	@newheadroom: new free bytes at head
15931da177e4SLinus Torvalds  *	@newtailroom: new free bytes at tail
15941da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
15951da177e4SLinus Torvalds  *
15961da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and its data and while doing so
15971da177e4SLinus Torvalds  *	allocate additional space.
15981da177e4SLinus Torvalds  *
15991da177e4SLinus Torvalds  *	This is used when the caller wishes to modify the data and needs a
16001da177e4SLinus Torvalds  *	private copy of the data to alter as well as more space for new fields.
16011da177e4SLinus Torvalds  *	Returns %NULL on failure or the pointer to the buffer
16021da177e4SLinus Torvalds  *	on success. The returned buffer has a reference count of 1.
16031da177e4SLinus Torvalds  *
16041da177e4SLinus Torvalds  *	You must pass %GFP_ATOMIC as the allocation priority if this function
16051da177e4SLinus Torvalds  *	is called from an interrupt.
16061da177e4SLinus Torvalds  */
16071da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
160886a76cafSVictor Fusco 				int newheadroom, int newtailroom,
1609dd0fc66fSAl Viro 				gfp_t gfp_mask)
16101da177e4SLinus Torvalds {
16111da177e4SLinus Torvalds 	/*
16121da177e4SLinus Torvalds 	 *	Allocate the copy buffer
16131da177e4SLinus Torvalds 	 */
1614c93bdd0eSMel Gorman 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1615c93bdd0eSMel Gorman 					gfp_mask, skb_alloc_rx_flag(skb),
1616c93bdd0eSMel Gorman 					NUMA_NO_NODE);
1617efd1e8d5SPatrick McHardy 	int oldheadroom = skb_headroom(skb);
16181da177e4SLinus Torvalds 	int head_copy_len, head_copy_off;
16191da177e4SLinus Torvalds 
16201da177e4SLinus Torvalds 	if (!n)
16211da177e4SLinus Torvalds 		return NULL;
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	skb_reserve(n, newheadroom);
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds 	/* Set the tail pointer and length */
16261da177e4SLinus Torvalds 	skb_put(n, skb->len);
16271da177e4SLinus Torvalds 
1628efd1e8d5SPatrick McHardy 	head_copy_len = oldheadroom;
16291da177e4SLinus Torvalds 	head_copy_off = 0;
16301da177e4SLinus Torvalds 	if (newheadroom <= head_copy_len)
16311da177e4SLinus Torvalds 		head_copy_len = newheadroom;
16321da177e4SLinus Torvalds 	else
16331da177e4SLinus Torvalds 		head_copy_off = newheadroom - head_copy_len;
16341da177e4SLinus Torvalds 
16351da177e4SLinus Torvalds 	/* Copy the linear header and data. */
16369f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
16379f77fad3STim Hansen 			     skb->len + head_copy_len));
16381da177e4SLinus Torvalds 
163908303c18SIlya Lesokhin 	skb_copy_header(n, skb);
16401da177e4SLinus Torvalds 
1641030737bcSEric Dumazet 	skb_headers_offset_update(n, newheadroom - oldheadroom);
1642efd1e8d5SPatrick McHardy 
16431da177e4SLinus Torvalds 	return n;
16441da177e4SLinus Torvalds }
1645b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand);
16461da177e4SLinus Torvalds 
16471da177e4SLinus Torvalds /**
1648cd0a137aSFlorian Fainelli  *	__skb_pad		-	zero pad the tail of an skb
16491da177e4SLinus Torvalds  *	@skb: buffer to pad
16501da177e4SLinus Torvalds  *	@pad: space to pad
1651cd0a137aSFlorian Fainelli  *	@free_on_error: free buffer on error
16521da177e4SLinus Torvalds  *
16531da177e4SLinus Torvalds  *	Ensure that a buffer is followed by a padding area that is zero
16541da177e4SLinus Torvalds  *	filled. Used by network drivers which may DMA or transfer data
16551da177e4SLinus Torvalds  *	beyond the buffer end onto the wire.
16561da177e4SLinus Torvalds  *
1657cd0a137aSFlorian Fainelli  *	May return error in out of memory cases. The skb is freed on error
1658cd0a137aSFlorian Fainelli  *	if @free_on_error is true.
16591da177e4SLinus Torvalds  */
16601da177e4SLinus Torvalds 
1661cd0a137aSFlorian Fainelli int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
16621da177e4SLinus Torvalds {
16635b057c6bSHerbert Xu 	int err;
16645b057c6bSHerbert Xu 	int ntail;
16651da177e4SLinus Torvalds 
16661da177e4SLinus Torvalds 	/* If the skbuff is non linear tailroom is always zero.. */
16675b057c6bSHerbert Xu 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
16681da177e4SLinus Torvalds 		memset(skb->data+skb->len, 0, pad);
16695b057c6bSHerbert Xu 		return 0;
16701da177e4SLinus Torvalds 	}
16711da177e4SLinus Torvalds 
16724305b541SArnaldo Carvalho de Melo 	ntail = skb->data_len + pad - (skb->end - skb->tail);
16735b057c6bSHerbert Xu 	if (likely(skb_cloned(skb) || ntail > 0)) {
16745b057c6bSHerbert Xu 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
16755b057c6bSHerbert Xu 		if (unlikely(err))
16765b057c6bSHerbert Xu 			goto free_skb;
16775b057c6bSHerbert Xu 	}
16785b057c6bSHerbert Xu 
16795b057c6bSHerbert Xu 	/* FIXME: The use of this function with non-linear skb's really needs
16805b057c6bSHerbert Xu 	 * to be audited.
16815b057c6bSHerbert Xu 	 */
16825b057c6bSHerbert Xu 	err = skb_linearize(skb);
16835b057c6bSHerbert Xu 	if (unlikely(err))
16845b057c6bSHerbert Xu 		goto free_skb;
16855b057c6bSHerbert Xu 
16865b057c6bSHerbert Xu 	memset(skb->data + skb->len, 0, pad);
16875b057c6bSHerbert Xu 	return 0;
16885b057c6bSHerbert Xu 
16895b057c6bSHerbert Xu free_skb:
1690cd0a137aSFlorian Fainelli 	if (free_on_error)
16911da177e4SLinus Torvalds 		kfree_skb(skb);
16925b057c6bSHerbert Xu 	return err;
16931da177e4SLinus Torvalds }
1694cd0a137aSFlorian Fainelli EXPORT_SYMBOL(__skb_pad);
16951da177e4SLinus Torvalds 
16960dde3e16SIlpo Järvinen /**
16970c7ddf36SMathias Krause  *	pskb_put - add data to the tail of a potentially fragmented buffer
16980c7ddf36SMathias Krause  *	@skb: start of the buffer to use
16990c7ddf36SMathias Krause  *	@tail: tail fragment of the buffer to use
17000c7ddf36SMathias Krause  *	@len: amount of data to add
17010c7ddf36SMathias Krause  *
17020c7ddf36SMathias Krause  *	This function extends the used data area of the potentially
17030c7ddf36SMathias Krause  *	fragmented buffer. @tail must be the last fragment of @skb -- or
17040c7ddf36SMathias Krause  *	@skb itself. If this would exceed the total buffer size the kernel
17050c7ddf36SMathias Krause  *	will panic. A pointer to the first byte of the extra data is
17060c7ddf36SMathias Krause  *	returned.
17070c7ddf36SMathias Krause  */
17080c7ddf36SMathias Krause 
17094df864c1SJohannes Berg void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
17100c7ddf36SMathias Krause {
17110c7ddf36SMathias Krause 	if (tail != skb) {
17120c7ddf36SMathias Krause 		skb->data_len += len;
17130c7ddf36SMathias Krause 		skb->len += len;
17140c7ddf36SMathias Krause 	}
17150c7ddf36SMathias Krause 	return skb_put(tail, len);
17160c7ddf36SMathias Krause }
17170c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put);
17180c7ddf36SMathias Krause 
17190c7ddf36SMathias Krause /**
17200dde3e16SIlpo Järvinen  *	skb_put - add data to a buffer
17210dde3e16SIlpo Järvinen  *	@skb: buffer to use
17220dde3e16SIlpo Järvinen  *	@len: amount of data to add
17230dde3e16SIlpo Järvinen  *
17240dde3e16SIlpo Järvinen  *	This function extends the used data area of the buffer. If this would
17250dde3e16SIlpo Järvinen  *	exceed the total buffer size the kernel will panic. A pointer to the
17260dde3e16SIlpo Järvinen  *	first byte of the extra data is returned.
17270dde3e16SIlpo Järvinen  */
17284df864c1SJohannes Berg void *skb_put(struct sk_buff *skb, unsigned int len)
17290dde3e16SIlpo Järvinen {
17304df864c1SJohannes Berg 	void *tmp = skb_tail_pointer(skb);
17310dde3e16SIlpo Järvinen 	SKB_LINEAR_ASSERT(skb);
17320dde3e16SIlpo Järvinen 	skb->tail += len;
17330dde3e16SIlpo Järvinen 	skb->len  += len;
17340dde3e16SIlpo Järvinen 	if (unlikely(skb->tail > skb->end))
17350dde3e16SIlpo Järvinen 		skb_over_panic(skb, len, __builtin_return_address(0));
17360dde3e16SIlpo Järvinen 	return tmp;
17370dde3e16SIlpo Järvinen }
17380dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put);
17390dde3e16SIlpo Järvinen 
17406be8ac2fSIlpo Järvinen /**
1741c2aa270aSIlpo Järvinen  *	skb_push - add data to the start of a buffer
1742c2aa270aSIlpo Järvinen  *	@skb: buffer to use
1743c2aa270aSIlpo Järvinen  *	@len: amount of data to add
1744c2aa270aSIlpo Järvinen  *
1745c2aa270aSIlpo Järvinen  *	This function extends the used data area of the buffer at the buffer
1746c2aa270aSIlpo Järvinen  *	start. If this would exceed the total buffer headroom the kernel will
1747c2aa270aSIlpo Järvinen  *	panic. A pointer to the first byte of the extra data is returned.
1748c2aa270aSIlpo Järvinen  */
1749d58ff351SJohannes Berg void *skb_push(struct sk_buff *skb, unsigned int len)
1750c2aa270aSIlpo Järvinen {
1751c2aa270aSIlpo Järvinen 	skb->data -= len;
1752c2aa270aSIlpo Järvinen 	skb->len  += len;
1753c2aa270aSIlpo Järvinen 	if (unlikely(skb->data < skb->head))
1754c2aa270aSIlpo Järvinen 		skb_under_panic(skb, len, __builtin_return_address(0));
1755c2aa270aSIlpo Järvinen 	return skb->data;
1756c2aa270aSIlpo Järvinen }
1757c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push);
1758c2aa270aSIlpo Järvinen 
1759c2aa270aSIlpo Järvinen /**
17606be8ac2fSIlpo Järvinen  *	skb_pull - remove data from the start of a buffer
17616be8ac2fSIlpo Järvinen  *	@skb: buffer to use
17626be8ac2fSIlpo Järvinen  *	@len: amount of data to remove
17636be8ac2fSIlpo Järvinen  *
17646be8ac2fSIlpo Järvinen  *	This function removes data from the start of a buffer, returning
17656be8ac2fSIlpo Järvinen  *	the memory to the headroom. A pointer to the next data in the buffer
17666be8ac2fSIlpo Järvinen  *	is returned. Once the data has been pulled future pushes will overwrite
17676be8ac2fSIlpo Järvinen  *	the old data.
17686be8ac2fSIlpo Järvinen  */
1769af72868bSJohannes Berg void *skb_pull(struct sk_buff *skb, unsigned int len)
17706be8ac2fSIlpo Järvinen {
177147d29646SDavid S. Miller 	return skb_pull_inline(skb, len);
17726be8ac2fSIlpo Järvinen }
17736be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull);
17746be8ac2fSIlpo Järvinen 
1775419ae74eSIlpo Järvinen /**
1776419ae74eSIlpo Järvinen  *	skb_trim - remove end from a buffer
1777419ae74eSIlpo Järvinen  *	@skb: buffer to alter
1778419ae74eSIlpo Järvinen  *	@len: new length
1779419ae74eSIlpo Järvinen  *
1780419ae74eSIlpo Järvinen  *	Cut the length of a buffer down by removing data from the tail. If
1781419ae74eSIlpo Järvinen  *	the buffer is already under the length specified it is not modified.
1782419ae74eSIlpo Järvinen  *	The skb must be linear.
1783419ae74eSIlpo Järvinen  */
1784419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len)
1785419ae74eSIlpo Järvinen {
1786419ae74eSIlpo Järvinen 	if (skb->len > len)
1787419ae74eSIlpo Järvinen 		__skb_trim(skb, len);
1788419ae74eSIlpo Järvinen }
1789419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim);
1790419ae74eSIlpo Järvinen 
17913cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers.
17921da177e4SLinus Torvalds  */
17931da177e4SLinus Torvalds 
17943cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len)
17951da177e4SLinus Torvalds {
179627b437c8SHerbert Xu 	struct sk_buff **fragp;
179727b437c8SHerbert Xu 	struct sk_buff *frag;
17981da177e4SLinus Torvalds 	int offset = skb_headlen(skb);
17991da177e4SLinus Torvalds 	int nfrags = skb_shinfo(skb)->nr_frags;
18001da177e4SLinus Torvalds 	int i;
180127b437c8SHerbert Xu 	int err;
180227b437c8SHerbert Xu 
180327b437c8SHerbert Xu 	if (skb_cloned(skb) &&
180427b437c8SHerbert Xu 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
180527b437c8SHerbert Xu 		return err;
18061da177e4SLinus Torvalds 
1807f4d26fb3SHerbert Xu 	i = 0;
1808f4d26fb3SHerbert Xu 	if (offset >= len)
1809f4d26fb3SHerbert Xu 		goto drop_pages;
1810f4d26fb3SHerbert Xu 
1811f4d26fb3SHerbert Xu 	for (; i < nfrags; i++) {
18129e903e08SEric Dumazet 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
181327b437c8SHerbert Xu 
181427b437c8SHerbert Xu 		if (end < len) {
18151da177e4SLinus Torvalds 			offset = end;
181627b437c8SHerbert Xu 			continue;
18171da177e4SLinus Torvalds 		}
18181da177e4SLinus Torvalds 
18199e903e08SEric Dumazet 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
182027b437c8SHerbert Xu 
1821f4d26fb3SHerbert Xu drop_pages:
182227b437c8SHerbert Xu 		skb_shinfo(skb)->nr_frags = i;
182327b437c8SHerbert Xu 
182427b437c8SHerbert Xu 		for (; i < nfrags; i++)
1825ea2ab693SIan Campbell 			skb_frag_unref(skb, i);
182627b437c8SHerbert Xu 
182721dc3301SDavid S. Miller 		if (skb_has_frag_list(skb))
182827b437c8SHerbert Xu 			skb_drop_fraglist(skb);
1829f4d26fb3SHerbert Xu 		goto done;
183027b437c8SHerbert Xu 	}
183127b437c8SHerbert Xu 
183227b437c8SHerbert Xu 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
183327b437c8SHerbert Xu 	     fragp = &frag->next) {
183427b437c8SHerbert Xu 		int end = offset + frag->len;
183527b437c8SHerbert Xu 
183627b437c8SHerbert Xu 		if (skb_shared(frag)) {
183727b437c8SHerbert Xu 			struct sk_buff *nfrag;
183827b437c8SHerbert Xu 
183927b437c8SHerbert Xu 			nfrag = skb_clone(frag, GFP_ATOMIC);
184027b437c8SHerbert Xu 			if (unlikely(!nfrag))
184127b437c8SHerbert Xu 				return -ENOMEM;
184227b437c8SHerbert Xu 
184327b437c8SHerbert Xu 			nfrag->next = frag->next;
184485bb2a60SEric Dumazet 			consume_skb(frag);
184527b437c8SHerbert Xu 			frag = nfrag;
184627b437c8SHerbert Xu 			*fragp = frag;
184727b437c8SHerbert Xu 		}
184827b437c8SHerbert Xu 
184927b437c8SHerbert Xu 		if (end < len) {
185027b437c8SHerbert Xu 			offset = end;
185127b437c8SHerbert Xu 			continue;
185227b437c8SHerbert Xu 		}
185327b437c8SHerbert Xu 
185427b437c8SHerbert Xu 		if (end > len &&
185527b437c8SHerbert Xu 		    unlikely((err = pskb_trim(frag, len - offset))))
185627b437c8SHerbert Xu 			return err;
185727b437c8SHerbert Xu 
185827b437c8SHerbert Xu 		if (frag->next)
185927b437c8SHerbert Xu 			skb_drop_list(&frag->next);
186027b437c8SHerbert Xu 		break;
186127b437c8SHerbert Xu 	}
186227b437c8SHerbert Xu 
1863f4d26fb3SHerbert Xu done:
186427b437c8SHerbert Xu 	if (len > skb_headlen(skb)) {
18651da177e4SLinus Torvalds 		skb->data_len -= skb->len - len;
18661da177e4SLinus Torvalds 		skb->len       = len;
18671da177e4SLinus Torvalds 	} else {
18681da177e4SLinus Torvalds 		skb->len       = len;
18691da177e4SLinus Torvalds 		skb->data_len  = 0;
187027a884dcSArnaldo Carvalho de Melo 		skb_set_tail_pointer(skb, len);
18711da177e4SLinus Torvalds 	}
18721da177e4SLinus Torvalds 
1873c21b48ccSEric Dumazet 	if (!skb->sk || skb->destructor == sock_edemux)
1874c21b48ccSEric Dumazet 		skb_condense(skb);
18751da177e4SLinus Torvalds 	return 0;
18761da177e4SLinus Torvalds }
1877b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim);
18781da177e4SLinus Torvalds 
187988078d98SEric Dumazet /* Note : use pskb_trim_rcsum() instead of calling this directly
188088078d98SEric Dumazet  */
188188078d98SEric Dumazet int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
188288078d98SEric Dumazet {
188388078d98SEric Dumazet 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
188488078d98SEric Dumazet 		int delta = skb->len - len;
188588078d98SEric Dumazet 
1886d55bef50SDimitris Michailidis 		skb->csum = csum_block_sub(skb->csum,
1887d55bef50SDimitris Michailidis 					   skb_checksum(skb, len, delta, 0),
1888d55bef50SDimitris Michailidis 					   len);
188988078d98SEric Dumazet 	}
189088078d98SEric Dumazet 	return __pskb_trim(skb, len);
189188078d98SEric Dumazet }
189288078d98SEric Dumazet EXPORT_SYMBOL(pskb_trim_rcsum_slow);
189388078d98SEric Dumazet 
18941da177e4SLinus Torvalds /**
18951da177e4SLinus Torvalds  *	__pskb_pull_tail - advance tail of skb header
18961da177e4SLinus Torvalds  *	@skb: buffer to reallocate
18971da177e4SLinus Torvalds  *	@delta: number of bytes to advance tail
18981da177e4SLinus Torvalds  *
18991da177e4SLinus Torvalds  *	The function makes a sense only on a fragmented &sk_buff,
19001da177e4SLinus Torvalds  *	it expands header moving its tail forward and copying necessary
19011da177e4SLinus Torvalds  *	data from fragmented part.
19021da177e4SLinus Torvalds  *
19031da177e4SLinus Torvalds  *	&sk_buff MUST have reference count of 1.
19041da177e4SLinus Torvalds  *
19051da177e4SLinus Torvalds  *	Returns %NULL (and &sk_buff does not change) if pull failed
19061da177e4SLinus Torvalds  *	or value of new tail of skb in the case of success.
19071da177e4SLinus Torvalds  *
19081da177e4SLinus Torvalds  *	All the pointers pointing into skb header may change and must be
19091da177e4SLinus Torvalds  *	reloaded after call to this function.
19101da177e4SLinus Torvalds  */
19111da177e4SLinus Torvalds 
19121da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part,
19131da177e4SLinus Torvalds  * when it is necessary.
19141da177e4SLinus Torvalds  * 1. It may fail due to malloc failure.
19151da177e4SLinus Torvalds  * 2. It may change skb pointers.
19161da177e4SLinus Torvalds  *
19171da177e4SLinus Torvalds  * It is pretty complicated. Luckily, it is called only in exceptional cases.
19181da177e4SLinus Torvalds  */
1919af72868bSJohannes Berg void *__pskb_pull_tail(struct sk_buff *skb, int delta)
19201da177e4SLinus Torvalds {
19211da177e4SLinus Torvalds 	/* If skb has not enough free space at tail, get new one
19221da177e4SLinus Torvalds 	 * plus 128 bytes for future expansions. If we have enough
19231da177e4SLinus Torvalds 	 * room at tail, reallocate without expansion only if skb is cloned.
19241da177e4SLinus Torvalds 	 */
19254305b541SArnaldo Carvalho de Melo 	int i, k, eat = (skb->tail + delta) - skb->end;
19261da177e4SLinus Torvalds 
19271da177e4SLinus Torvalds 	if (eat > 0 || skb_cloned(skb)) {
19281da177e4SLinus Torvalds 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
19291da177e4SLinus Torvalds 				     GFP_ATOMIC))
19301da177e4SLinus Torvalds 			return NULL;
19311da177e4SLinus Torvalds 	}
19321da177e4SLinus Torvalds 
19339f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
19349f77fad3STim Hansen 			     skb_tail_pointer(skb), delta));
19351da177e4SLinus Torvalds 
19361da177e4SLinus Torvalds 	/* Optimization: no fragments, no reasons to preestimate
19371da177e4SLinus Torvalds 	 * size of pulled pages. Superb.
19381da177e4SLinus Torvalds 	 */
193921dc3301SDavid S. Miller 	if (!skb_has_frag_list(skb))
19401da177e4SLinus Torvalds 		goto pull_pages;
19411da177e4SLinus Torvalds 
19421da177e4SLinus Torvalds 	/* Estimate size of pulled pages. */
19431da177e4SLinus Torvalds 	eat = delta;
19441da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
19459e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
19469e903e08SEric Dumazet 
19479e903e08SEric Dumazet 		if (size >= eat)
19481da177e4SLinus Torvalds 			goto pull_pages;
19499e903e08SEric Dumazet 		eat -= size;
19501da177e4SLinus Torvalds 	}
19511da177e4SLinus Torvalds 
19521da177e4SLinus Torvalds 	/* If we need update frag list, we are in troubles.
195309001b03SWenhua Shi 	 * Certainly, it is possible to add an offset to skb data,
19541da177e4SLinus Torvalds 	 * but taking into account that pulling is expected to
19551da177e4SLinus Torvalds 	 * be very rare operation, it is worth to fight against
19561da177e4SLinus Torvalds 	 * further bloating skb head and crucify ourselves here instead.
19571da177e4SLinus Torvalds 	 * Pure masohism, indeed. 8)8)
19581da177e4SLinus Torvalds 	 */
19591da177e4SLinus Torvalds 	if (eat) {
19601da177e4SLinus Torvalds 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
19611da177e4SLinus Torvalds 		struct sk_buff *clone = NULL;
19621da177e4SLinus Torvalds 		struct sk_buff *insp = NULL;
19631da177e4SLinus Torvalds 
19641da177e4SLinus Torvalds 		do {
19651da177e4SLinus Torvalds 			if (list->len <= eat) {
19661da177e4SLinus Torvalds 				/* Eaten as whole. */
19671da177e4SLinus Torvalds 				eat -= list->len;
19681da177e4SLinus Torvalds 				list = list->next;
19691da177e4SLinus Torvalds 				insp = list;
19701da177e4SLinus Torvalds 			} else {
19711da177e4SLinus Torvalds 				/* Eaten partially. */
19721da177e4SLinus Torvalds 
19731da177e4SLinus Torvalds 				if (skb_shared(list)) {
19741da177e4SLinus Torvalds 					/* Sucks! We need to fork list. :-( */
19751da177e4SLinus Torvalds 					clone = skb_clone(list, GFP_ATOMIC);
19761da177e4SLinus Torvalds 					if (!clone)
19771da177e4SLinus Torvalds 						return NULL;
19781da177e4SLinus Torvalds 					insp = list->next;
19791da177e4SLinus Torvalds 					list = clone;
19801da177e4SLinus Torvalds 				} else {
19811da177e4SLinus Torvalds 					/* This may be pulled without
19821da177e4SLinus Torvalds 					 * problems. */
19831da177e4SLinus Torvalds 					insp = list;
19841da177e4SLinus Torvalds 				}
19851da177e4SLinus Torvalds 				if (!pskb_pull(list, eat)) {
19861da177e4SLinus Torvalds 					kfree_skb(clone);
19871da177e4SLinus Torvalds 					return NULL;
19881da177e4SLinus Torvalds 				}
19891da177e4SLinus Torvalds 				break;
19901da177e4SLinus Torvalds 			}
19911da177e4SLinus Torvalds 		} while (eat);
19921da177e4SLinus Torvalds 
19931da177e4SLinus Torvalds 		/* Free pulled out fragments. */
19941da177e4SLinus Torvalds 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
19951da177e4SLinus Torvalds 			skb_shinfo(skb)->frag_list = list->next;
19961da177e4SLinus Torvalds 			kfree_skb(list);
19971da177e4SLinus Torvalds 		}
19981da177e4SLinus Torvalds 		/* And insert new clone at head. */
19991da177e4SLinus Torvalds 		if (clone) {
20001da177e4SLinus Torvalds 			clone->next = list;
20011da177e4SLinus Torvalds 			skb_shinfo(skb)->frag_list = clone;
20021da177e4SLinus Torvalds 		}
20031da177e4SLinus Torvalds 	}
20041da177e4SLinus Torvalds 	/* Success! Now we may commit changes to skb data. */
20051da177e4SLinus Torvalds 
20061da177e4SLinus Torvalds pull_pages:
20071da177e4SLinus Torvalds 	eat = delta;
20081da177e4SLinus Torvalds 	k = 0;
20091da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
20109e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
20119e903e08SEric Dumazet 
20129e903e08SEric Dumazet 		if (size <= eat) {
2013ea2ab693SIan Campbell 			skb_frag_unref(skb, i);
20149e903e08SEric Dumazet 			eat -= size;
20151da177e4SLinus Torvalds 		} else {
20161da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
20171da177e4SLinus Torvalds 			if (eat) {
20181da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
20199e903e08SEric Dumazet 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
20203ccc6c6fSlinzhang 				if (!i)
20213ccc6c6fSlinzhang 					goto end;
20221da177e4SLinus Torvalds 				eat = 0;
20231da177e4SLinus Torvalds 			}
20241da177e4SLinus Torvalds 			k++;
20251da177e4SLinus Torvalds 		}
20261da177e4SLinus Torvalds 	}
20271da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
20281da177e4SLinus Torvalds 
20293ccc6c6fSlinzhang end:
20301da177e4SLinus Torvalds 	skb->tail     += delta;
20311da177e4SLinus Torvalds 	skb->data_len -= delta;
20321da177e4SLinus Torvalds 
20331f8b977aSWillem de Bruijn 	if (!skb->data_len)
20341f8b977aSWillem de Bruijn 		skb_zcopy_clear(skb, false);
20351f8b977aSWillem de Bruijn 
203627a884dcSArnaldo Carvalho de Melo 	return skb_tail_pointer(skb);
20371da177e4SLinus Torvalds }
2038b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail);
20391da177e4SLinus Torvalds 
204022019b17SEric Dumazet /**
204122019b17SEric Dumazet  *	skb_copy_bits - copy bits from skb to kernel buffer
204222019b17SEric Dumazet  *	@skb: source skb
204322019b17SEric Dumazet  *	@offset: offset in source
204422019b17SEric Dumazet  *	@to: destination buffer
204522019b17SEric Dumazet  *	@len: number of bytes to copy
204622019b17SEric Dumazet  *
204722019b17SEric Dumazet  *	Copy the specified number of bytes from the source skb to the
204822019b17SEric Dumazet  *	destination buffer.
204922019b17SEric Dumazet  *
205022019b17SEric Dumazet  *	CAUTION ! :
205122019b17SEric Dumazet  *		If its prototype is ever changed,
205222019b17SEric Dumazet  *		check arch/{*}/net/{*}.S files,
205322019b17SEric Dumazet  *		since it is called from BPF assembly code.
205422019b17SEric Dumazet  */
20551da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
20561da177e4SLinus Torvalds {
20571a028e50SDavid S. Miller 	int start = skb_headlen(skb);
2058fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
2059fbb398a8SDavid S. Miller 	int i, copy;
20601da177e4SLinus Torvalds 
20611da177e4SLinus Torvalds 	if (offset > (int)skb->len - len)
20621da177e4SLinus Torvalds 		goto fault;
20631da177e4SLinus Torvalds 
20641da177e4SLinus Torvalds 	/* Copy header. */
20651a028e50SDavid S. Miller 	if ((copy = start - offset) > 0) {
20661da177e4SLinus Torvalds 		if (copy > len)
20671da177e4SLinus Torvalds 			copy = len;
2068d626f62bSArnaldo Carvalho de Melo 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
20691da177e4SLinus Torvalds 		if ((len -= copy) == 0)
20701da177e4SLinus Torvalds 			return 0;
20711da177e4SLinus Torvalds 		offset += copy;
20721da177e4SLinus Torvalds 		to     += copy;
20731da177e4SLinus Torvalds 	}
20741da177e4SLinus Torvalds 
20751da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
20761a028e50SDavid S. Miller 		int end;
207751c56b00SEric Dumazet 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
20781da177e4SLinus Torvalds 
2079547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
20801a028e50SDavid S. Miller 
208151c56b00SEric Dumazet 		end = start + skb_frag_size(f);
20821da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2083c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2084c613c209SWillem de Bruijn 			struct page *p;
20851da177e4SLinus Torvalds 			u8 *vaddr;
20861da177e4SLinus Torvalds 
20871da177e4SLinus Torvalds 			if (copy > len)
20881da177e4SLinus Torvalds 				copy = len;
20891da177e4SLinus Torvalds 
2090c613c209SWillem de Bruijn 			skb_frag_foreach_page(f,
2091c613c209SWillem de Bruijn 					      f->page_offset + offset - start,
2092c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2093c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2094c613c209SWillem de Bruijn 				memcpy(to + copied, vaddr + p_off, p_len);
209551c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2096c613c209SWillem de Bruijn 			}
20971da177e4SLinus Torvalds 
20981da177e4SLinus Torvalds 			if ((len -= copy) == 0)
20991da177e4SLinus Torvalds 				return 0;
21001da177e4SLinus Torvalds 			offset += copy;
21011da177e4SLinus Torvalds 			to     += copy;
21021da177e4SLinus Torvalds 		}
21031a028e50SDavid S. Miller 		start = end;
21041da177e4SLinus Torvalds 	}
21051da177e4SLinus Torvalds 
2106fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
21071a028e50SDavid S. Miller 		int end;
21081da177e4SLinus Torvalds 
2109547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
21101a028e50SDavid S. Miller 
2111fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
21121da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
21131da177e4SLinus Torvalds 			if (copy > len)
21141da177e4SLinus Torvalds 				copy = len;
2115fbb398a8SDavid S. Miller 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
21161da177e4SLinus Torvalds 				goto fault;
21171da177e4SLinus Torvalds 			if ((len -= copy) == 0)
21181da177e4SLinus Torvalds 				return 0;
21191da177e4SLinus Torvalds 			offset += copy;
21201da177e4SLinus Torvalds 			to     += copy;
21211da177e4SLinus Torvalds 		}
21221a028e50SDavid S. Miller 		start = end;
21231da177e4SLinus Torvalds 	}
2124a6686f2fSShirley Ma 
21251da177e4SLinus Torvalds 	if (!len)
21261da177e4SLinus Torvalds 		return 0;
21271da177e4SLinus Torvalds 
21281da177e4SLinus Torvalds fault:
21291da177e4SLinus Torvalds 	return -EFAULT;
21301da177e4SLinus Torvalds }
2131b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits);
21321da177e4SLinus Torvalds 
21339c55e01cSJens Axboe /*
21349c55e01cSJens Axboe  * Callback from splice_to_pipe(), if we need to release some pages
21359c55e01cSJens Axboe  * at the end of the spd in case we error'ed out in filling the pipe.
21369c55e01cSJens Axboe  */
21379c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
21389c55e01cSJens Axboe {
21398b9d3728SJarek Poplawski 	put_page(spd->pages[i]);
21408b9d3728SJarek Poplawski }
21419c55e01cSJens Axboe 
2142a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len,
21434fb66994SJarek Poplawski 				   unsigned int *offset,
214418aafc62SEric Dumazet 				   struct sock *sk)
21458b9d3728SJarek Poplawski {
21465640f768SEric Dumazet 	struct page_frag *pfrag = sk_page_frag(sk);
21478b9d3728SJarek Poplawski 
21485640f768SEric Dumazet 	if (!sk_page_frag_refill(sk, pfrag))
21498b9d3728SJarek Poplawski 		return NULL;
21504fb66994SJarek Poplawski 
21515640f768SEric Dumazet 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
21524fb66994SJarek Poplawski 
21535640f768SEric Dumazet 	memcpy(page_address(pfrag->page) + pfrag->offset,
21545640f768SEric Dumazet 	       page_address(page) + *offset, *len);
21555640f768SEric Dumazet 	*offset = pfrag->offset;
21565640f768SEric Dumazet 	pfrag->offset += *len;
21574fb66994SJarek Poplawski 
21585640f768SEric Dumazet 	return pfrag->page;
21599c55e01cSJens Axboe }
21609c55e01cSJens Axboe 
216141c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
216241c73a0dSEric Dumazet 			     struct page *page,
216341c73a0dSEric Dumazet 			     unsigned int offset)
216441c73a0dSEric Dumazet {
216541c73a0dSEric Dumazet 	return	spd->nr_pages &&
216641c73a0dSEric Dumazet 		spd->pages[spd->nr_pages - 1] == page &&
216741c73a0dSEric Dumazet 		(spd->partial[spd->nr_pages - 1].offset +
216841c73a0dSEric Dumazet 		 spd->partial[spd->nr_pages - 1].len == offset);
216941c73a0dSEric Dumazet }
217041c73a0dSEric Dumazet 
21719c55e01cSJens Axboe /*
21729c55e01cSJens Axboe  * Fill page/offset/length into spd, if it can hold more pages.
21739c55e01cSJens Axboe  */
2174a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd,
217535f3d14dSJens Axboe 			  struct pipe_inode_info *pipe, struct page *page,
21764fb66994SJarek Poplawski 			  unsigned int *len, unsigned int offset,
217718aafc62SEric Dumazet 			  bool linear,
21787a67e56fSJarek Poplawski 			  struct sock *sk)
21799c55e01cSJens Axboe {
218041c73a0dSEric Dumazet 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2181a108d5f3SDavid S. Miller 		return true;
21829c55e01cSJens Axboe 
21838b9d3728SJarek Poplawski 	if (linear) {
218418aafc62SEric Dumazet 		page = linear_to_page(page, len, &offset, sk);
21858b9d3728SJarek Poplawski 		if (!page)
2186a108d5f3SDavid S. Miller 			return true;
218741c73a0dSEric Dumazet 	}
218841c73a0dSEric Dumazet 	if (spd_can_coalesce(spd, page, offset)) {
218941c73a0dSEric Dumazet 		spd->partial[spd->nr_pages - 1].len += *len;
2190a108d5f3SDavid S. Miller 		return false;
219141c73a0dSEric Dumazet 	}
21928b9d3728SJarek Poplawski 	get_page(page);
21939c55e01cSJens Axboe 	spd->pages[spd->nr_pages] = page;
21944fb66994SJarek Poplawski 	spd->partial[spd->nr_pages].len = *len;
21959c55e01cSJens Axboe 	spd->partial[spd->nr_pages].offset = offset;
21969c55e01cSJens Axboe 	spd->nr_pages++;
21978b9d3728SJarek Poplawski 
2198a108d5f3SDavid S. Miller 	return false;
21999c55e01cSJens Axboe }
22009c55e01cSJens Axboe 
2201a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff,
22022870c43dSOctavian Purdila 			     unsigned int plen, unsigned int *off,
220318aafc62SEric Dumazet 			     unsigned int *len,
2204d7ccf7c0SEric Dumazet 			     struct splice_pipe_desc *spd, bool linear,
220535f3d14dSJens Axboe 			     struct sock *sk,
220635f3d14dSJens Axboe 			     struct pipe_inode_info *pipe)
22079c55e01cSJens Axboe {
22082870c43dSOctavian Purdila 	if (!*len)
2209a108d5f3SDavid S. Miller 		return true;
22109c55e01cSJens Axboe 
22112870c43dSOctavian Purdila 	/* skip this segment if already processed */
22122870c43dSOctavian Purdila 	if (*off >= plen) {
22132870c43dSOctavian Purdila 		*off -= plen;
2214a108d5f3SDavid S. Miller 		return false;
22152870c43dSOctavian Purdila 	}
22162870c43dSOctavian Purdila 
22172870c43dSOctavian Purdila 	/* ignore any bits we already processed */
22189ca1b22dSEric Dumazet 	poff += *off;
22199ca1b22dSEric Dumazet 	plen -= *off;
22202870c43dSOctavian Purdila 	*off = 0;
22212870c43dSOctavian Purdila 
222218aafc62SEric Dumazet 	do {
222318aafc62SEric Dumazet 		unsigned int flen = min(*len, plen);
22242870c43dSOctavian Purdila 
222518aafc62SEric Dumazet 		if (spd_fill_page(spd, pipe, page, &flen, poff,
222618aafc62SEric Dumazet 				  linear, sk))
2227a108d5f3SDavid S. Miller 			return true;
222818aafc62SEric Dumazet 		poff += flen;
222918aafc62SEric Dumazet 		plen -= flen;
22302870c43dSOctavian Purdila 		*len -= flen;
223118aafc62SEric Dumazet 	} while (*len && plen);
22322870c43dSOctavian Purdila 
2233a108d5f3SDavid S. Miller 	return false;
2234db43a282SOctavian Purdila }
22359c55e01cSJens Axboe 
22369c55e01cSJens Axboe /*
2237a108d5f3SDavid S. Miller  * Map linear and fragment data from the skb to spd. It reports true if the
22382870c43dSOctavian Purdila  * pipe is full or if we already spliced the requested length.
22399c55e01cSJens Axboe  */
2240a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
224135f3d14dSJens Axboe 			      unsigned int *offset, unsigned int *len,
224235f3d14dSJens Axboe 			      struct splice_pipe_desc *spd, struct sock *sk)
22432870c43dSOctavian Purdila {
22442870c43dSOctavian Purdila 	int seg;
2245fa9835e5STom Herbert 	struct sk_buff *iter;
22469c55e01cSJens Axboe 
22471d0c0b32SEric Dumazet 	/* map the linear part :
22482996d31fSAlexander Duyck 	 * If skb->head_frag is set, this 'linear' part is backed by a
22492996d31fSAlexander Duyck 	 * fragment, and if the head is not shared with any clones then
22502996d31fSAlexander Duyck 	 * we can avoid a copy since we own the head portion of this page.
22519c55e01cSJens Axboe 	 */
22522870c43dSOctavian Purdila 	if (__splice_segment(virt_to_page(skb->data),
22532870c43dSOctavian Purdila 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
22542870c43dSOctavian Purdila 			     skb_headlen(skb),
225518aafc62SEric Dumazet 			     offset, len, spd,
22563a7c1ee4SAlexander Duyck 			     skb_head_is_locked(skb),
22571d0c0b32SEric Dumazet 			     sk, pipe))
2258a108d5f3SDavid S. Miller 		return true;
22599c55e01cSJens Axboe 
22609c55e01cSJens Axboe 	/*
22619c55e01cSJens Axboe 	 * then map the fragments
22629c55e01cSJens Axboe 	 */
22639c55e01cSJens Axboe 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
22649c55e01cSJens Axboe 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
22659c55e01cSJens Axboe 
2266ea2ab693SIan Campbell 		if (__splice_segment(skb_frag_page(f),
22679e903e08SEric Dumazet 				     f->page_offset, skb_frag_size(f),
226818aafc62SEric Dumazet 				     offset, len, spd, false, sk, pipe))
2269a108d5f3SDavid S. Miller 			return true;
22709c55e01cSJens Axboe 	}
22719c55e01cSJens Axboe 
2272fa9835e5STom Herbert 	skb_walk_frags(skb, iter) {
2273fa9835e5STom Herbert 		if (*offset >= iter->len) {
2274fa9835e5STom Herbert 			*offset -= iter->len;
2275fa9835e5STom Herbert 			continue;
2276fa9835e5STom Herbert 		}
2277fa9835e5STom Herbert 		/* __skb_splice_bits() only fails if the output has no room
2278fa9835e5STom Herbert 		 * left, so no point in going over the frag_list for the error
2279fa9835e5STom Herbert 		 * case.
2280fa9835e5STom Herbert 		 */
2281fa9835e5STom Herbert 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2282fa9835e5STom Herbert 			return true;
2283fa9835e5STom Herbert 	}
2284fa9835e5STom Herbert 
2285a108d5f3SDavid S. Miller 	return false;
22869c55e01cSJens Axboe }
22879c55e01cSJens Axboe 
22889c55e01cSJens Axboe /*
22899c55e01cSJens Axboe  * Map data from the skb to a pipe. Should handle both the linear part,
2290fa9835e5STom Herbert  * the fragments, and the frag list.
22919c55e01cSJens Axboe  */
2292a60e3cc7SHannes Frederic Sowa int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
22939c55e01cSJens Axboe 		    struct pipe_inode_info *pipe, unsigned int tlen,
229425869262SAl Viro 		    unsigned int flags)
22959c55e01cSJens Axboe {
229641c73a0dSEric Dumazet 	struct partial_page partial[MAX_SKB_FRAGS];
229741c73a0dSEric Dumazet 	struct page *pages[MAX_SKB_FRAGS];
22989c55e01cSJens Axboe 	struct splice_pipe_desc spd = {
22999c55e01cSJens Axboe 		.pages = pages,
23009c55e01cSJens Axboe 		.partial = partial,
2301047fe360SEric Dumazet 		.nr_pages_max = MAX_SKB_FRAGS,
230228a625cbSMiklos Szeredi 		.ops = &nosteal_pipe_buf_ops,
23039c55e01cSJens Axboe 		.spd_release = sock_spd_release,
23049c55e01cSJens Axboe 	};
230535f3d14dSJens Axboe 	int ret = 0;
230635f3d14dSJens Axboe 
2307fa9835e5STom Herbert 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
23089c55e01cSJens Axboe 
2309a60e3cc7SHannes Frederic Sowa 	if (spd.nr_pages)
231025869262SAl Viro 		ret = splice_to_pipe(pipe, &spd);
23119c55e01cSJens Axboe 
231235f3d14dSJens Axboe 	return ret;
23139c55e01cSJens Axboe }
23142b514574SHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_splice_bits);
23159c55e01cSJens Axboe 
231620bf50deSTom Herbert /* Send skb data on a socket. Socket must be locked. */
231720bf50deSTom Herbert int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
231820bf50deSTom Herbert 			 int len)
231920bf50deSTom Herbert {
232020bf50deSTom Herbert 	unsigned int orig_len = len;
232120bf50deSTom Herbert 	struct sk_buff *head = skb;
232220bf50deSTom Herbert 	unsigned short fragidx;
232320bf50deSTom Herbert 	int slen, ret;
232420bf50deSTom Herbert 
232520bf50deSTom Herbert do_frag_list:
232620bf50deSTom Herbert 
232720bf50deSTom Herbert 	/* Deal with head data */
232820bf50deSTom Herbert 	while (offset < skb_headlen(skb) && len) {
232920bf50deSTom Herbert 		struct kvec kv;
233020bf50deSTom Herbert 		struct msghdr msg;
233120bf50deSTom Herbert 
233220bf50deSTom Herbert 		slen = min_t(int, len, skb_headlen(skb) - offset);
233320bf50deSTom Herbert 		kv.iov_base = skb->data + offset;
2334db5980d8SJohn Fastabend 		kv.iov_len = slen;
233520bf50deSTom Herbert 		memset(&msg, 0, sizeof(msg));
233620bf50deSTom Herbert 
233720bf50deSTom Herbert 		ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
233820bf50deSTom Herbert 		if (ret <= 0)
233920bf50deSTom Herbert 			goto error;
234020bf50deSTom Herbert 
234120bf50deSTom Herbert 		offset += ret;
234220bf50deSTom Herbert 		len -= ret;
234320bf50deSTom Herbert 	}
234420bf50deSTom Herbert 
234520bf50deSTom Herbert 	/* All the data was skb head? */
234620bf50deSTom Herbert 	if (!len)
234720bf50deSTom Herbert 		goto out;
234820bf50deSTom Herbert 
234920bf50deSTom Herbert 	/* Make offset relative to start of frags */
235020bf50deSTom Herbert 	offset -= skb_headlen(skb);
235120bf50deSTom Herbert 
235220bf50deSTom Herbert 	/* Find where we are in frag list */
235320bf50deSTom Herbert 	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
235420bf50deSTom Herbert 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
235520bf50deSTom Herbert 
235620bf50deSTom Herbert 		if (offset < frag->size)
235720bf50deSTom Herbert 			break;
235820bf50deSTom Herbert 
235920bf50deSTom Herbert 		offset -= frag->size;
236020bf50deSTom Herbert 	}
236120bf50deSTom Herbert 
236220bf50deSTom Herbert 	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
236320bf50deSTom Herbert 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
236420bf50deSTom Herbert 
236520bf50deSTom Herbert 		slen = min_t(size_t, len, frag->size - offset);
236620bf50deSTom Herbert 
236720bf50deSTom Herbert 		while (slen) {
236820bf50deSTom Herbert 			ret = kernel_sendpage_locked(sk, frag->page.p,
236920bf50deSTom Herbert 						     frag->page_offset + offset,
237020bf50deSTom Herbert 						     slen, MSG_DONTWAIT);
237120bf50deSTom Herbert 			if (ret <= 0)
237220bf50deSTom Herbert 				goto error;
237320bf50deSTom Herbert 
237420bf50deSTom Herbert 			len -= ret;
237520bf50deSTom Herbert 			offset += ret;
237620bf50deSTom Herbert 			slen -= ret;
237720bf50deSTom Herbert 		}
237820bf50deSTom Herbert 
237920bf50deSTom Herbert 		offset = 0;
238020bf50deSTom Herbert 	}
238120bf50deSTom Herbert 
238220bf50deSTom Herbert 	if (len) {
238320bf50deSTom Herbert 		/* Process any frag lists */
238420bf50deSTom Herbert 
238520bf50deSTom Herbert 		if (skb == head) {
238620bf50deSTom Herbert 			if (skb_has_frag_list(skb)) {
238720bf50deSTom Herbert 				skb = skb_shinfo(skb)->frag_list;
238820bf50deSTom Herbert 				goto do_frag_list;
238920bf50deSTom Herbert 			}
239020bf50deSTom Herbert 		} else if (skb->next) {
239120bf50deSTom Herbert 			skb = skb->next;
239220bf50deSTom Herbert 			goto do_frag_list;
239320bf50deSTom Herbert 		}
239420bf50deSTom Herbert 	}
239520bf50deSTom Herbert 
239620bf50deSTom Herbert out:
239720bf50deSTom Herbert 	return orig_len - len;
239820bf50deSTom Herbert 
239920bf50deSTom Herbert error:
240020bf50deSTom Herbert 	return orig_len == len ? ret : orig_len - len;
240120bf50deSTom Herbert }
240220bf50deSTom Herbert EXPORT_SYMBOL_GPL(skb_send_sock_locked);
240320bf50deSTom Herbert 
2404357b40a1SHerbert Xu /**
2405357b40a1SHerbert Xu  *	skb_store_bits - store bits from kernel buffer to skb
2406357b40a1SHerbert Xu  *	@skb: destination buffer
2407357b40a1SHerbert Xu  *	@offset: offset in destination
2408357b40a1SHerbert Xu  *	@from: source buffer
2409357b40a1SHerbert Xu  *	@len: number of bytes to copy
2410357b40a1SHerbert Xu  *
2411357b40a1SHerbert Xu  *	Copy the specified number of bytes from the source buffer to the
2412357b40a1SHerbert Xu  *	destination skb.  This function handles all the messy bits of
2413357b40a1SHerbert Xu  *	traversing fragment lists and such.
2414357b40a1SHerbert Xu  */
2415357b40a1SHerbert Xu 
24160c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2417357b40a1SHerbert Xu {
24181a028e50SDavid S. Miller 	int start = skb_headlen(skb);
2419fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
2420fbb398a8SDavid S. Miller 	int i, copy;
2421357b40a1SHerbert Xu 
2422357b40a1SHerbert Xu 	if (offset > (int)skb->len - len)
2423357b40a1SHerbert Xu 		goto fault;
2424357b40a1SHerbert Xu 
24251a028e50SDavid S. Miller 	if ((copy = start - offset) > 0) {
2426357b40a1SHerbert Xu 		if (copy > len)
2427357b40a1SHerbert Xu 			copy = len;
242827d7ff46SArnaldo Carvalho de Melo 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
2429357b40a1SHerbert Xu 		if ((len -= copy) == 0)
2430357b40a1SHerbert Xu 			return 0;
2431357b40a1SHerbert Xu 		offset += copy;
2432357b40a1SHerbert Xu 		from += copy;
2433357b40a1SHerbert Xu 	}
2434357b40a1SHerbert Xu 
2435357b40a1SHerbert Xu 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2436357b40a1SHerbert Xu 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
24371a028e50SDavid S. Miller 		int end;
2438357b40a1SHerbert Xu 
2439547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
24401a028e50SDavid S. Miller 
24419e903e08SEric Dumazet 		end = start + skb_frag_size(frag);
2442357b40a1SHerbert Xu 		if ((copy = end - offset) > 0) {
2443c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2444c613c209SWillem de Bruijn 			struct page *p;
2445357b40a1SHerbert Xu 			u8 *vaddr;
2446357b40a1SHerbert Xu 
2447357b40a1SHerbert Xu 			if (copy > len)
2448357b40a1SHerbert Xu 				copy = len;
2449357b40a1SHerbert Xu 
2450c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2451c613c209SWillem de Bruijn 					      frag->page_offset + offset - start,
2452c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2453c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2454c613c209SWillem de Bruijn 				memcpy(vaddr + p_off, from + copied, p_len);
245551c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2456c613c209SWillem de Bruijn 			}
2457357b40a1SHerbert Xu 
2458357b40a1SHerbert Xu 			if ((len -= copy) == 0)
2459357b40a1SHerbert Xu 				return 0;
2460357b40a1SHerbert Xu 			offset += copy;
2461357b40a1SHerbert Xu 			from += copy;
2462357b40a1SHerbert Xu 		}
24631a028e50SDavid S. Miller 		start = end;
2464357b40a1SHerbert Xu 	}
2465357b40a1SHerbert Xu 
2466fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
24671a028e50SDavid S. Miller 		int end;
2468357b40a1SHerbert Xu 
2469547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
24701a028e50SDavid S. Miller 
2471fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
2472357b40a1SHerbert Xu 		if ((copy = end - offset) > 0) {
2473357b40a1SHerbert Xu 			if (copy > len)
2474357b40a1SHerbert Xu 				copy = len;
2475fbb398a8SDavid S. Miller 			if (skb_store_bits(frag_iter, offset - start,
24761a028e50SDavid S. Miller 					   from, copy))
2477357b40a1SHerbert Xu 				goto fault;
2478357b40a1SHerbert Xu 			if ((len -= copy) == 0)
2479357b40a1SHerbert Xu 				return 0;
2480357b40a1SHerbert Xu 			offset += copy;
2481357b40a1SHerbert Xu 			from += copy;
2482357b40a1SHerbert Xu 		}
24831a028e50SDavid S. Miller 		start = end;
2484357b40a1SHerbert Xu 	}
2485357b40a1SHerbert Xu 	if (!len)
2486357b40a1SHerbert Xu 		return 0;
2487357b40a1SHerbert Xu 
2488357b40a1SHerbert Xu fault:
2489357b40a1SHerbert Xu 	return -EFAULT;
2490357b40a1SHerbert Xu }
2491357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits);
2492357b40a1SHerbert Xu 
24931da177e4SLinus Torvalds /* Checksum skb data. */
24942817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
24952817a336SDaniel Borkmann 		      __wsum csum, const struct skb_checksum_ops *ops)
24961da177e4SLinus Torvalds {
24971a028e50SDavid S. Miller 	int start = skb_headlen(skb);
24981a028e50SDavid S. Miller 	int i, copy = start - offset;
2499fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
25001da177e4SLinus Torvalds 	int pos = 0;
25011da177e4SLinus Torvalds 
25021da177e4SLinus Torvalds 	/* Checksum header. */
25031da177e4SLinus Torvalds 	if (copy > 0) {
25041da177e4SLinus Torvalds 		if (copy > len)
25051da177e4SLinus Torvalds 			copy = len;
25062817a336SDaniel Borkmann 		csum = ops->update(skb->data + offset, copy, csum);
25071da177e4SLinus Torvalds 		if ((len -= copy) == 0)
25081da177e4SLinus Torvalds 			return csum;
25091da177e4SLinus Torvalds 		offset += copy;
25101da177e4SLinus Torvalds 		pos	= copy;
25111da177e4SLinus Torvalds 	}
25121da177e4SLinus Torvalds 
25131da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
25141a028e50SDavid S. Miller 		int end;
251551c56b00SEric Dumazet 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
25161da177e4SLinus Torvalds 
2517547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
25181a028e50SDavid S. Miller 
251951c56b00SEric Dumazet 		end = start + skb_frag_size(frag);
25201da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2521c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2522c613c209SWillem de Bruijn 			struct page *p;
252344bb9363SAl Viro 			__wsum csum2;
25241da177e4SLinus Torvalds 			u8 *vaddr;
25251da177e4SLinus Torvalds 
25261da177e4SLinus Torvalds 			if (copy > len)
25271da177e4SLinus Torvalds 				copy = len;
2528c613c209SWillem de Bruijn 
2529c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2530c613c209SWillem de Bruijn 					      frag->page_offset + offset - start,
2531c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2532c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2533c613c209SWillem de Bruijn 				csum2 = ops->update(vaddr + p_off, p_len, 0);
253451c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2535c613c209SWillem de Bruijn 				csum = ops->combine(csum, csum2, pos, p_len);
2536c613c209SWillem de Bruijn 				pos += p_len;
2537c613c209SWillem de Bruijn 			}
2538c613c209SWillem de Bruijn 
25391da177e4SLinus Torvalds 			if (!(len -= copy))
25401da177e4SLinus Torvalds 				return csum;
25411da177e4SLinus Torvalds 			offset += copy;
25421da177e4SLinus Torvalds 		}
25431a028e50SDavid S. Miller 		start = end;
25441da177e4SLinus Torvalds 	}
25451da177e4SLinus Torvalds 
2546fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
25471a028e50SDavid S. Miller 		int end;
25481da177e4SLinus Torvalds 
2549547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
25501a028e50SDavid S. Miller 
2551fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
25521da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
25535f92a738SAl Viro 			__wsum csum2;
25541da177e4SLinus Torvalds 			if (copy > len)
25551da177e4SLinus Torvalds 				copy = len;
25562817a336SDaniel Borkmann 			csum2 = __skb_checksum(frag_iter, offset - start,
25572817a336SDaniel Borkmann 					       copy, 0, ops);
25582817a336SDaniel Borkmann 			csum = ops->combine(csum, csum2, pos, copy);
25591da177e4SLinus Torvalds 			if ((len -= copy) == 0)
25601da177e4SLinus Torvalds 				return csum;
25611da177e4SLinus Torvalds 			offset += copy;
25621da177e4SLinus Torvalds 			pos    += copy;
25631da177e4SLinus Torvalds 		}
25641a028e50SDavid S. Miller 		start = end;
25651da177e4SLinus Torvalds 	}
256609a62660SKris Katterjohn 	BUG_ON(len);
25671da177e4SLinus Torvalds 
25681da177e4SLinus Torvalds 	return csum;
25691da177e4SLinus Torvalds }
25702817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum);
25712817a336SDaniel Borkmann 
25722817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset,
25732817a336SDaniel Borkmann 		    int len, __wsum csum)
25742817a336SDaniel Borkmann {
25752817a336SDaniel Borkmann 	const struct skb_checksum_ops ops = {
2576cea80ea8SDaniel Borkmann 		.update  = csum_partial_ext,
25772817a336SDaniel Borkmann 		.combine = csum_block_add_ext,
25782817a336SDaniel Borkmann 	};
25792817a336SDaniel Borkmann 
25802817a336SDaniel Borkmann 	return __skb_checksum(skb, offset, len, csum, &ops);
25812817a336SDaniel Borkmann }
2582b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum);
25831da177e4SLinus Torvalds 
25841da177e4SLinus Torvalds /* Both of above in one bottle. */
25851da177e4SLinus Torvalds 
258681d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
258781d77662SAl Viro 				    u8 *to, int len, __wsum csum)
25881da177e4SLinus Torvalds {
25891a028e50SDavid S. Miller 	int start = skb_headlen(skb);
25901a028e50SDavid S. Miller 	int i, copy = start - offset;
2591fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
25921da177e4SLinus Torvalds 	int pos = 0;
25931da177e4SLinus Torvalds 
25941da177e4SLinus Torvalds 	/* Copy header. */
25951da177e4SLinus Torvalds 	if (copy > 0) {
25961da177e4SLinus Torvalds 		if (copy > len)
25971da177e4SLinus Torvalds 			copy = len;
25981da177e4SLinus Torvalds 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
25991da177e4SLinus Torvalds 						 copy, csum);
26001da177e4SLinus Torvalds 		if ((len -= copy) == 0)
26011da177e4SLinus Torvalds 			return csum;
26021da177e4SLinus Torvalds 		offset += copy;
26031da177e4SLinus Torvalds 		to     += copy;
26041da177e4SLinus Torvalds 		pos	= copy;
26051da177e4SLinus Torvalds 	}
26061da177e4SLinus Torvalds 
26071da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
26081a028e50SDavid S. Miller 		int end;
26091da177e4SLinus Torvalds 
2610547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
26111a028e50SDavid S. Miller 
26129e903e08SEric Dumazet 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
26131da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2614c613c209SWillem de Bruijn 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2615c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2616c613c209SWillem de Bruijn 			struct page *p;
26175084205fSAl Viro 			__wsum csum2;
26181da177e4SLinus Torvalds 			u8 *vaddr;
26191da177e4SLinus Torvalds 
26201da177e4SLinus Torvalds 			if (copy > len)
26211da177e4SLinus Torvalds 				copy = len;
2622c613c209SWillem de Bruijn 
2623c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2624c613c209SWillem de Bruijn 					      frag->page_offset + offset - start,
2625c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2626c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2627c613c209SWillem de Bruijn 				csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2628c613c209SWillem de Bruijn 								  to + copied,
2629c613c209SWillem de Bruijn 								  p_len, 0);
263051c56b00SEric Dumazet 				kunmap_atomic(vaddr);
26311da177e4SLinus Torvalds 				csum = csum_block_add(csum, csum2, pos);
2632c613c209SWillem de Bruijn 				pos += p_len;
2633c613c209SWillem de Bruijn 			}
2634c613c209SWillem de Bruijn 
26351da177e4SLinus Torvalds 			if (!(len -= copy))
26361da177e4SLinus Torvalds 				return csum;
26371da177e4SLinus Torvalds 			offset += copy;
26381da177e4SLinus Torvalds 			to     += copy;
26391da177e4SLinus Torvalds 		}
26401a028e50SDavid S. Miller 		start = end;
26411da177e4SLinus Torvalds 	}
26421da177e4SLinus Torvalds 
2643fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
264481d77662SAl Viro 		__wsum csum2;
26451a028e50SDavid S. Miller 		int end;
26461da177e4SLinus Torvalds 
2647547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
26481a028e50SDavid S. Miller 
2649fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
26501da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
26511da177e4SLinus Torvalds 			if (copy > len)
26521da177e4SLinus Torvalds 				copy = len;
2653fbb398a8SDavid S. Miller 			csum2 = skb_copy_and_csum_bits(frag_iter,
26541a028e50SDavid S. Miller 						       offset - start,
26551da177e4SLinus Torvalds 						       to, copy, 0);
26561da177e4SLinus Torvalds 			csum = csum_block_add(csum, csum2, pos);
26571da177e4SLinus Torvalds 			if ((len -= copy) == 0)
26581da177e4SLinus Torvalds 				return csum;
26591da177e4SLinus Torvalds 			offset += copy;
26601da177e4SLinus Torvalds 			to     += copy;
26611da177e4SLinus Torvalds 			pos    += copy;
26621da177e4SLinus Torvalds 		}
26631a028e50SDavid S. Miller 		start = end;
26641da177e4SLinus Torvalds 	}
266509a62660SKris Katterjohn 	BUG_ON(len);
26661da177e4SLinus Torvalds 	return csum;
26671da177e4SLinus Torvalds }
2668b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits);
26691da177e4SLinus Torvalds 
267049f8e832SCong Wang __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
267149f8e832SCong Wang {
267249f8e832SCong Wang 	__sum16 sum;
267349f8e832SCong Wang 
267449f8e832SCong Wang 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
267514641931SCong Wang 	/* See comments in __skb_checksum_complete(). */
267649f8e832SCong Wang 	if (likely(!sum)) {
267749f8e832SCong Wang 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
267849f8e832SCong Wang 		    !skb->csum_complete_sw)
26797fe50ac8SCong Wang 			netdev_rx_csum_fault(skb->dev, skb);
268049f8e832SCong Wang 	}
268149f8e832SCong Wang 	if (!skb_shared(skb))
268249f8e832SCong Wang 		skb->csum_valid = !sum;
268349f8e832SCong Wang 	return sum;
268449f8e832SCong Wang }
268549f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete_head);
268649f8e832SCong Wang 
268714641931SCong Wang /* This function assumes skb->csum already holds pseudo header's checksum,
268814641931SCong Wang  * which has been changed from the hardware checksum, for example, by
268914641931SCong Wang  * __skb_checksum_validate_complete(). And, the original skb->csum must
269014641931SCong Wang  * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
269114641931SCong Wang  *
269214641931SCong Wang  * It returns non-zero if the recomputed checksum is still invalid, otherwise
269314641931SCong Wang  * zero. The new checksum is stored back into skb->csum unless the skb is
269414641931SCong Wang  * shared.
269514641931SCong Wang  */
269649f8e832SCong Wang __sum16 __skb_checksum_complete(struct sk_buff *skb)
269749f8e832SCong Wang {
269849f8e832SCong Wang 	__wsum csum;
269949f8e832SCong Wang 	__sum16 sum;
270049f8e832SCong Wang 
270149f8e832SCong Wang 	csum = skb_checksum(skb, 0, skb->len, 0);
270249f8e832SCong Wang 
270349f8e832SCong Wang 	sum = csum_fold(csum_add(skb->csum, csum));
270414641931SCong Wang 	/* This check is inverted, because we already knew the hardware
270514641931SCong Wang 	 * checksum is invalid before calling this function. So, if the
270614641931SCong Wang 	 * re-computed checksum is valid instead, then we have a mismatch
270714641931SCong Wang 	 * between the original skb->csum and skb_checksum(). This means either
270814641931SCong Wang 	 * the original hardware checksum is incorrect or we screw up skb->csum
270914641931SCong Wang 	 * when moving skb->data around.
271014641931SCong Wang 	 */
271149f8e832SCong Wang 	if (likely(!sum)) {
271249f8e832SCong Wang 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
271349f8e832SCong Wang 		    !skb->csum_complete_sw)
27147fe50ac8SCong Wang 			netdev_rx_csum_fault(skb->dev, skb);
271549f8e832SCong Wang 	}
271649f8e832SCong Wang 
271749f8e832SCong Wang 	if (!skb_shared(skb)) {
271849f8e832SCong Wang 		/* Save full packet checksum */
271949f8e832SCong Wang 		skb->csum = csum;
272049f8e832SCong Wang 		skb->ip_summed = CHECKSUM_COMPLETE;
272149f8e832SCong Wang 		skb->csum_complete_sw = 1;
272249f8e832SCong Wang 		skb->csum_valid = !sum;
272349f8e832SCong Wang 	}
272449f8e832SCong Wang 
272549f8e832SCong Wang 	return sum;
272649f8e832SCong Wang }
272749f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete);
272849f8e832SCong Wang 
27299617813dSDavide Caratti static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
27309617813dSDavide Caratti {
27319617813dSDavide Caratti 	net_warn_ratelimited(
27329617813dSDavide Caratti 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
27339617813dSDavide Caratti 		__func__);
27349617813dSDavide Caratti 	return 0;
27359617813dSDavide Caratti }
27369617813dSDavide Caratti 
27379617813dSDavide Caratti static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
27389617813dSDavide Caratti 				       int offset, int len)
27399617813dSDavide Caratti {
27409617813dSDavide Caratti 	net_warn_ratelimited(
27419617813dSDavide Caratti 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
27429617813dSDavide Caratti 		__func__);
27439617813dSDavide Caratti 	return 0;
27449617813dSDavide Caratti }
27459617813dSDavide Caratti 
27469617813dSDavide Caratti static const struct skb_checksum_ops default_crc32c_ops = {
27479617813dSDavide Caratti 	.update  = warn_crc32c_csum_update,
27489617813dSDavide Caratti 	.combine = warn_crc32c_csum_combine,
27499617813dSDavide Caratti };
27509617813dSDavide Caratti 
27519617813dSDavide Caratti const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
27529617813dSDavide Caratti 	&default_crc32c_ops;
27539617813dSDavide Caratti EXPORT_SYMBOL(crc32c_csum_stub);
27549617813dSDavide Caratti 
2755af2806f8SThomas Graf  /**
2756af2806f8SThomas Graf  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2757af2806f8SThomas Graf  *	@from: source buffer
2758af2806f8SThomas Graf  *
2759af2806f8SThomas Graf  *	Calculates the amount of linear headroom needed in the 'to' skb passed
2760af2806f8SThomas Graf  *	into skb_zerocopy().
2761af2806f8SThomas Graf  */
2762af2806f8SThomas Graf unsigned int
2763af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from)
2764af2806f8SThomas Graf {
2765af2806f8SThomas Graf 	unsigned int hlen = 0;
2766af2806f8SThomas Graf 
2767af2806f8SThomas Graf 	if (!from->head_frag ||
2768af2806f8SThomas Graf 	    skb_headlen(from) < L1_CACHE_BYTES ||
2769af2806f8SThomas Graf 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2770af2806f8SThomas Graf 		hlen = skb_headlen(from);
2771af2806f8SThomas Graf 
2772af2806f8SThomas Graf 	if (skb_has_frag_list(from))
2773af2806f8SThomas Graf 		hlen = from->len;
2774af2806f8SThomas Graf 
2775af2806f8SThomas Graf 	return hlen;
2776af2806f8SThomas Graf }
2777af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2778af2806f8SThomas Graf 
2779af2806f8SThomas Graf /**
2780af2806f8SThomas Graf  *	skb_zerocopy - Zero copy skb to skb
2781af2806f8SThomas Graf  *	@to: destination buffer
27827fceb4deSMasanari Iida  *	@from: source buffer
2783af2806f8SThomas Graf  *	@len: number of bytes to copy from source buffer
2784af2806f8SThomas Graf  *	@hlen: size of linear headroom in destination buffer
2785af2806f8SThomas Graf  *
2786af2806f8SThomas Graf  *	Copies up to `len` bytes from `from` to `to` by creating references
2787af2806f8SThomas Graf  *	to the frags in the source buffer.
2788af2806f8SThomas Graf  *
2789af2806f8SThomas Graf  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2790af2806f8SThomas Graf  *	headroom in the `to` buffer.
279136d5fe6aSZoltan Kiss  *
279236d5fe6aSZoltan Kiss  *	Return value:
279336d5fe6aSZoltan Kiss  *	0: everything is OK
279436d5fe6aSZoltan Kiss  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
279536d5fe6aSZoltan Kiss  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
2796af2806f8SThomas Graf  */
279736d5fe6aSZoltan Kiss int
279836d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2799af2806f8SThomas Graf {
2800af2806f8SThomas Graf 	int i, j = 0;
2801af2806f8SThomas Graf 	int plen = 0; /* length of skb->head fragment */
280236d5fe6aSZoltan Kiss 	int ret;
2803af2806f8SThomas Graf 	struct page *page;
2804af2806f8SThomas Graf 	unsigned int offset;
2805af2806f8SThomas Graf 
2806af2806f8SThomas Graf 	BUG_ON(!from->head_frag && !hlen);
2807af2806f8SThomas Graf 
2808af2806f8SThomas Graf 	/* dont bother with small payloads */
280936d5fe6aSZoltan Kiss 	if (len <= skb_tailroom(to))
281036d5fe6aSZoltan Kiss 		return skb_copy_bits(from, 0, skb_put(to, len), len);
2811af2806f8SThomas Graf 
2812af2806f8SThomas Graf 	if (hlen) {
281336d5fe6aSZoltan Kiss 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
281436d5fe6aSZoltan Kiss 		if (unlikely(ret))
281536d5fe6aSZoltan Kiss 			return ret;
2816af2806f8SThomas Graf 		len -= hlen;
2817af2806f8SThomas Graf 	} else {
2818af2806f8SThomas Graf 		plen = min_t(int, skb_headlen(from), len);
2819af2806f8SThomas Graf 		if (plen) {
2820af2806f8SThomas Graf 			page = virt_to_head_page(from->head);
2821af2806f8SThomas Graf 			offset = from->data - (unsigned char *)page_address(page);
2822af2806f8SThomas Graf 			__skb_fill_page_desc(to, 0, page, offset, plen);
2823af2806f8SThomas Graf 			get_page(page);
2824af2806f8SThomas Graf 			j = 1;
2825af2806f8SThomas Graf 			len -= plen;
2826af2806f8SThomas Graf 		}
2827af2806f8SThomas Graf 	}
2828af2806f8SThomas Graf 
2829af2806f8SThomas Graf 	to->truesize += len + plen;
2830af2806f8SThomas Graf 	to->len += len + plen;
2831af2806f8SThomas Graf 	to->data_len += len + plen;
2832af2806f8SThomas Graf 
283336d5fe6aSZoltan Kiss 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
283436d5fe6aSZoltan Kiss 		skb_tx_error(from);
283536d5fe6aSZoltan Kiss 		return -ENOMEM;
283636d5fe6aSZoltan Kiss 	}
28371f8b977aSWillem de Bruijn 	skb_zerocopy_clone(to, from, GFP_ATOMIC);
283836d5fe6aSZoltan Kiss 
2839af2806f8SThomas Graf 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2840af2806f8SThomas Graf 		if (!len)
2841af2806f8SThomas Graf 			break;
2842af2806f8SThomas Graf 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2843af2806f8SThomas Graf 		skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2844af2806f8SThomas Graf 		len -= skb_shinfo(to)->frags[j].size;
2845af2806f8SThomas Graf 		skb_frag_ref(to, j);
2846af2806f8SThomas Graf 		j++;
2847af2806f8SThomas Graf 	}
2848af2806f8SThomas Graf 	skb_shinfo(to)->nr_frags = j;
284936d5fe6aSZoltan Kiss 
285036d5fe6aSZoltan Kiss 	return 0;
2851af2806f8SThomas Graf }
2852af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy);
2853af2806f8SThomas Graf 
28541da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
28551da177e4SLinus Torvalds {
2856d3bc23e7SAl Viro 	__wsum csum;
28571da177e4SLinus Torvalds 	long csstart;
28581da177e4SLinus Torvalds 
285984fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL)
286055508d60SMichał Mirosław 		csstart = skb_checksum_start_offset(skb);
28611da177e4SLinus Torvalds 	else
28621da177e4SLinus Torvalds 		csstart = skb_headlen(skb);
28631da177e4SLinus Torvalds 
286409a62660SKris Katterjohn 	BUG_ON(csstart > skb_headlen(skb));
28651da177e4SLinus Torvalds 
2866d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data(skb, to, csstart);
28671da177e4SLinus Torvalds 
28681da177e4SLinus Torvalds 	csum = 0;
28691da177e4SLinus Torvalds 	if (csstart != skb->len)
28701da177e4SLinus Torvalds 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
28711da177e4SLinus Torvalds 					      skb->len - csstart, 0);
28721da177e4SLinus Torvalds 
287384fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2874ff1dcadbSAl Viro 		long csstuff = csstart + skb->csum_offset;
28751da177e4SLinus Torvalds 
2876d3bc23e7SAl Viro 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
28771da177e4SLinus Torvalds 	}
28781da177e4SLinus Torvalds }
2879b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev);
28801da177e4SLinus Torvalds 
28811da177e4SLinus Torvalds /**
28821da177e4SLinus Torvalds  *	skb_dequeue - remove from the head of the queue
28831da177e4SLinus Torvalds  *	@list: list to dequeue from
28841da177e4SLinus Torvalds  *
28851da177e4SLinus Torvalds  *	Remove the head of the list. The list lock is taken so the function
28861da177e4SLinus Torvalds  *	may be used safely with other locking list functions. The head item is
28871da177e4SLinus Torvalds  *	returned or %NULL if the list is empty.
28881da177e4SLinus Torvalds  */
28891da177e4SLinus Torvalds 
28901da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list)
28911da177e4SLinus Torvalds {
28921da177e4SLinus Torvalds 	unsigned long flags;
28931da177e4SLinus Torvalds 	struct sk_buff *result;
28941da177e4SLinus Torvalds 
28951da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
28961da177e4SLinus Torvalds 	result = __skb_dequeue(list);
28971da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
28981da177e4SLinus Torvalds 	return result;
28991da177e4SLinus Torvalds }
2900b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue);
29011da177e4SLinus Torvalds 
29021da177e4SLinus Torvalds /**
29031da177e4SLinus Torvalds  *	skb_dequeue_tail - remove from the tail of the queue
29041da177e4SLinus Torvalds  *	@list: list to dequeue from
29051da177e4SLinus Torvalds  *
29061da177e4SLinus Torvalds  *	Remove the tail of the list. The list lock is taken so the function
29071da177e4SLinus Torvalds  *	may be used safely with other locking list functions. The tail item is
29081da177e4SLinus Torvalds  *	returned or %NULL if the list is empty.
29091da177e4SLinus Torvalds  */
29101da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
29111da177e4SLinus Torvalds {
29121da177e4SLinus Torvalds 	unsigned long flags;
29131da177e4SLinus Torvalds 	struct sk_buff *result;
29141da177e4SLinus Torvalds 
29151da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
29161da177e4SLinus Torvalds 	result = __skb_dequeue_tail(list);
29171da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
29181da177e4SLinus Torvalds 	return result;
29191da177e4SLinus Torvalds }
2920b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail);
29211da177e4SLinus Torvalds 
29221da177e4SLinus Torvalds /**
29231da177e4SLinus Torvalds  *	skb_queue_purge - empty a list
29241da177e4SLinus Torvalds  *	@list: list to empty
29251da177e4SLinus Torvalds  *
29261da177e4SLinus Torvalds  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
29271da177e4SLinus Torvalds  *	the list and one reference dropped. This function takes the list
29281da177e4SLinus Torvalds  *	lock and is atomic with respect to other list locking functions.
29291da177e4SLinus Torvalds  */
29301da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list)
29311da177e4SLinus Torvalds {
29321da177e4SLinus Torvalds 	struct sk_buff *skb;
29331da177e4SLinus Torvalds 	while ((skb = skb_dequeue(list)) != NULL)
29341da177e4SLinus Torvalds 		kfree_skb(skb);
29351da177e4SLinus Torvalds }
2936b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge);
29371da177e4SLinus Torvalds 
29381da177e4SLinus Torvalds /**
29399f5afeaeSYaogong Wang  *	skb_rbtree_purge - empty a skb rbtree
29409f5afeaeSYaogong Wang  *	@root: root of the rbtree to empty
2941385114deSPeter Oskolkov  *	Return value: the sum of truesizes of all purged skbs.
29429f5afeaeSYaogong Wang  *
29439f5afeaeSYaogong Wang  *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
29449f5afeaeSYaogong Wang  *	the list and one reference dropped. This function does not take
29459f5afeaeSYaogong Wang  *	any lock. Synchronization should be handled by the caller (e.g., TCP
29469f5afeaeSYaogong Wang  *	out-of-order queue is protected by the socket lock).
29479f5afeaeSYaogong Wang  */
2948385114deSPeter Oskolkov unsigned int skb_rbtree_purge(struct rb_root *root)
29499f5afeaeSYaogong Wang {
29507c90584cSEric Dumazet 	struct rb_node *p = rb_first(root);
2951385114deSPeter Oskolkov 	unsigned int sum = 0;
29529f5afeaeSYaogong Wang 
29537c90584cSEric Dumazet 	while (p) {
29547c90584cSEric Dumazet 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
29557c90584cSEric Dumazet 
29567c90584cSEric Dumazet 		p = rb_next(p);
29577c90584cSEric Dumazet 		rb_erase(&skb->rbnode, root);
2958385114deSPeter Oskolkov 		sum += skb->truesize;
29599f5afeaeSYaogong Wang 		kfree_skb(skb);
29607c90584cSEric Dumazet 	}
2961385114deSPeter Oskolkov 	return sum;
29629f5afeaeSYaogong Wang }
29639f5afeaeSYaogong Wang 
29649f5afeaeSYaogong Wang /**
29651da177e4SLinus Torvalds  *	skb_queue_head - queue a buffer at the list head
29661da177e4SLinus Torvalds  *	@list: list to use
29671da177e4SLinus Torvalds  *	@newsk: buffer to queue
29681da177e4SLinus Torvalds  *
29691da177e4SLinus Torvalds  *	Queue a buffer at the start of the list. This function takes the
29701da177e4SLinus Torvalds  *	list lock and can be used safely with other locking &sk_buff functions
29711da177e4SLinus Torvalds  *	safely.
29721da177e4SLinus Torvalds  *
29731da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
29741da177e4SLinus Torvalds  */
29751da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
29761da177e4SLinus Torvalds {
29771da177e4SLinus Torvalds 	unsigned long flags;
29781da177e4SLinus Torvalds 
29791da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
29801da177e4SLinus Torvalds 	__skb_queue_head(list, newsk);
29811da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
29821da177e4SLinus Torvalds }
2983b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head);
29841da177e4SLinus Torvalds 
29851da177e4SLinus Torvalds /**
29861da177e4SLinus Torvalds  *	skb_queue_tail - queue a buffer at the list tail
29871da177e4SLinus Torvalds  *	@list: list to use
29881da177e4SLinus Torvalds  *	@newsk: buffer to queue
29891da177e4SLinus Torvalds  *
29901da177e4SLinus Torvalds  *	Queue a buffer at the tail of the list. This function takes the
29911da177e4SLinus Torvalds  *	list lock and can be used safely with other locking &sk_buff functions
29921da177e4SLinus Torvalds  *	safely.
29931da177e4SLinus Torvalds  *
29941da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
29951da177e4SLinus Torvalds  */
29961da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
29971da177e4SLinus Torvalds {
29981da177e4SLinus Torvalds 	unsigned long flags;
29991da177e4SLinus Torvalds 
30001da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
30011da177e4SLinus Torvalds 	__skb_queue_tail(list, newsk);
30021da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
30031da177e4SLinus Torvalds }
3004b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail);
30058728b834SDavid S. Miller 
30061da177e4SLinus Torvalds /**
30071da177e4SLinus Torvalds  *	skb_unlink	-	remove a buffer from a list
30081da177e4SLinus Torvalds  *	@skb: buffer to remove
30098728b834SDavid S. Miller  *	@list: list to use
30101da177e4SLinus Torvalds  *
30118728b834SDavid S. Miller  *	Remove a packet from a list. The list locks are taken and this
30128728b834SDavid S. Miller  *	function is atomic with respect to other list locked calls
30131da177e4SLinus Torvalds  *
30148728b834SDavid S. Miller  *	You must know what list the SKB is on.
30151da177e4SLinus Torvalds  */
30168728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
30171da177e4SLinus Torvalds {
30181da177e4SLinus Torvalds 	unsigned long flags;
30191da177e4SLinus Torvalds 
30201da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
30218728b834SDavid S. Miller 	__skb_unlink(skb, list);
30221da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
30231da177e4SLinus Torvalds }
3024b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink);
30251da177e4SLinus Torvalds 
30261da177e4SLinus Torvalds /**
30271da177e4SLinus Torvalds  *	skb_append	-	append a buffer
30281da177e4SLinus Torvalds  *	@old: buffer to insert after
30291da177e4SLinus Torvalds  *	@newsk: buffer to insert
30308728b834SDavid S. Miller  *	@list: list to use
30311da177e4SLinus Torvalds  *
30321da177e4SLinus Torvalds  *	Place a packet after a given packet in a list. The list locks are taken
30331da177e4SLinus Torvalds  *	and this function is atomic with respect to other list locked calls.
30341da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
30351da177e4SLinus Torvalds  */
30368728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
30371da177e4SLinus Torvalds {
30381da177e4SLinus Torvalds 	unsigned long flags;
30391da177e4SLinus Torvalds 
30408728b834SDavid S. Miller 	spin_lock_irqsave(&list->lock, flags);
30417de6c033SGerrit Renker 	__skb_queue_after(list, old, newsk);
30428728b834SDavid S. Miller 	spin_unlock_irqrestore(&list->lock, flags);
30431da177e4SLinus Torvalds }
3044b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append);
30451da177e4SLinus Torvalds 
30461da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb,
30471da177e4SLinus Torvalds 					   struct sk_buff* skb1,
30481da177e4SLinus Torvalds 					   const u32 len, const int pos)
30491da177e4SLinus Torvalds {
30501da177e4SLinus Torvalds 	int i;
30511da177e4SLinus Torvalds 
3052d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3053d626f62bSArnaldo Carvalho de Melo 					 pos - len);
30541da177e4SLinus Torvalds 	/* And move data appendix as is. */
30551da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
30561da177e4SLinus Torvalds 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
30571da177e4SLinus Torvalds 
30581da177e4SLinus Torvalds 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
30591da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags  = 0;
30601da177e4SLinus Torvalds 	skb1->data_len		   = skb->data_len;
30611da177e4SLinus Torvalds 	skb1->len		   += skb1->data_len;
30621da177e4SLinus Torvalds 	skb->data_len		   = 0;
30631da177e4SLinus Torvalds 	skb->len		   = len;
306427a884dcSArnaldo Carvalho de Melo 	skb_set_tail_pointer(skb, len);
30651da177e4SLinus Torvalds }
30661da177e4SLinus Torvalds 
30671da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb,
30681da177e4SLinus Torvalds 				       struct sk_buff* skb1,
30691da177e4SLinus Torvalds 				       const u32 len, int pos)
30701da177e4SLinus Torvalds {
30711da177e4SLinus Torvalds 	int i, k = 0;
30721da177e4SLinus Torvalds 	const int nfrags = skb_shinfo(skb)->nr_frags;
30731da177e4SLinus Torvalds 
30741da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = 0;
30751da177e4SLinus Torvalds 	skb1->len		  = skb1->data_len = skb->len - len;
30761da177e4SLinus Torvalds 	skb->len		  = len;
30771da177e4SLinus Torvalds 	skb->data_len		  = len - pos;
30781da177e4SLinus Torvalds 
30791da177e4SLinus Torvalds 	for (i = 0; i < nfrags; i++) {
30809e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
30811da177e4SLinus Torvalds 
30821da177e4SLinus Torvalds 		if (pos + size > len) {
30831da177e4SLinus Torvalds 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
30841da177e4SLinus Torvalds 
30851da177e4SLinus Torvalds 			if (pos < len) {
30861da177e4SLinus Torvalds 				/* Split frag.
30871da177e4SLinus Torvalds 				 * We have two variants in this case:
30881da177e4SLinus Torvalds 				 * 1. Move all the frag to the second
30891da177e4SLinus Torvalds 				 *    part, if it is possible. F.e.
30901da177e4SLinus Torvalds 				 *    this approach is mandatory for TUX,
30911da177e4SLinus Torvalds 				 *    where splitting is expensive.
30921da177e4SLinus Torvalds 				 * 2. Split is accurately. We make this.
30931da177e4SLinus Torvalds 				 */
3094ea2ab693SIan Campbell 				skb_frag_ref(skb, i);
30951da177e4SLinus Torvalds 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
30969e903e08SEric Dumazet 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
30979e903e08SEric Dumazet 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
30981da177e4SLinus Torvalds 				skb_shinfo(skb)->nr_frags++;
30991da177e4SLinus Torvalds 			}
31001da177e4SLinus Torvalds 			k++;
31011da177e4SLinus Torvalds 		} else
31021da177e4SLinus Torvalds 			skb_shinfo(skb)->nr_frags++;
31031da177e4SLinus Torvalds 		pos += size;
31041da177e4SLinus Torvalds 	}
31051da177e4SLinus Torvalds 	skb_shinfo(skb1)->nr_frags = k;
31061da177e4SLinus Torvalds }
31071da177e4SLinus Torvalds 
31081da177e4SLinus Torvalds /**
31091da177e4SLinus Torvalds  * skb_split - Split fragmented skb to two parts at length len.
31101da177e4SLinus Torvalds  * @skb: the buffer to split
31111da177e4SLinus Torvalds  * @skb1: the buffer to receive the second part
31121da177e4SLinus Torvalds  * @len: new length for skb
31131da177e4SLinus Torvalds  */
31141da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
31151da177e4SLinus Torvalds {
31161da177e4SLinus Torvalds 	int pos = skb_headlen(skb);
31171da177e4SLinus Torvalds 
3118fff88030SWillem de Bruijn 	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3119fff88030SWillem de Bruijn 				      SKBTX_SHARED_FRAG;
31201f8b977aSWillem de Bruijn 	skb_zerocopy_clone(skb1, skb, 0);
31211da177e4SLinus Torvalds 	if (len < pos)	/* Split line is inside header. */
31221da177e4SLinus Torvalds 		skb_split_inside_header(skb, skb1, len, pos);
31231da177e4SLinus Torvalds 	else		/* Second chunk has no header, nothing to copy. */
31241da177e4SLinus Torvalds 		skb_split_no_header(skb, skb1, len, pos);
31251da177e4SLinus Torvalds }
3126b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split);
31271da177e4SLinus Torvalds 
31289f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go.
31299f782db3SIlpo Järvinen  *
31309f782db3SIlpo Järvinen  * Caller cannot keep skb_shinfo related pointers past calling here!
31319f782db3SIlpo Järvinen  */
3132832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb)
3133832d11c5SIlpo Järvinen {
31340ace2856SIlpo Järvinen 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3135832d11c5SIlpo Järvinen }
3136832d11c5SIlpo Järvinen 
3137832d11c5SIlpo Järvinen /**
3138832d11c5SIlpo Järvinen  * skb_shift - Shifts paged data partially from skb to another
3139832d11c5SIlpo Järvinen  * @tgt: buffer into which tail data gets added
3140832d11c5SIlpo Järvinen  * @skb: buffer from which the paged data comes from
3141832d11c5SIlpo Järvinen  * @shiftlen: shift up to this many bytes
3142832d11c5SIlpo Järvinen  *
3143832d11c5SIlpo Järvinen  * Attempts to shift up to shiftlen worth of bytes, which may be less than
314420e994a0SFeng King  * the length of the skb, from skb to tgt. Returns number bytes shifted.
3145832d11c5SIlpo Järvinen  * It's up to caller to free skb if everything was shifted.
3146832d11c5SIlpo Järvinen  *
3147832d11c5SIlpo Järvinen  * If @tgt runs out of frags, the whole operation is aborted.
3148832d11c5SIlpo Järvinen  *
3149832d11c5SIlpo Järvinen  * Skb cannot include anything else but paged data while tgt is allowed
3150832d11c5SIlpo Järvinen  * to have non-paged data as well.
3151832d11c5SIlpo Järvinen  *
3152832d11c5SIlpo Järvinen  * TODO: full sized shift could be optimized but that would need
3153832d11c5SIlpo Järvinen  * specialized skb free'er to handle frags without up-to-date nr_frags.
3154832d11c5SIlpo Järvinen  */
3155832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3156832d11c5SIlpo Järvinen {
3157832d11c5SIlpo Järvinen 	int from, to, merge, todo;
3158832d11c5SIlpo Järvinen 	struct skb_frag_struct *fragfrom, *fragto;
3159832d11c5SIlpo Järvinen 
3160832d11c5SIlpo Järvinen 	BUG_ON(shiftlen > skb->len);
3161f8071cdeSEric Dumazet 
3162f8071cdeSEric Dumazet 	if (skb_headlen(skb))
3163f8071cdeSEric Dumazet 		return 0;
31641f8b977aSWillem de Bruijn 	if (skb_zcopy(tgt) || skb_zcopy(skb))
31651f8b977aSWillem de Bruijn 		return 0;
3166832d11c5SIlpo Järvinen 
3167832d11c5SIlpo Järvinen 	todo = shiftlen;
3168832d11c5SIlpo Järvinen 	from = 0;
3169832d11c5SIlpo Järvinen 	to = skb_shinfo(tgt)->nr_frags;
3170832d11c5SIlpo Järvinen 	fragfrom = &skb_shinfo(skb)->frags[from];
3171832d11c5SIlpo Järvinen 
3172832d11c5SIlpo Järvinen 	/* Actual merge is delayed until the point when we know we can
3173832d11c5SIlpo Järvinen 	 * commit all, so that we don't have to undo partial changes
3174832d11c5SIlpo Järvinen 	 */
3175832d11c5SIlpo Järvinen 	if (!to ||
3176ea2ab693SIan Campbell 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3177ea2ab693SIan Campbell 			      fragfrom->page_offset)) {
3178832d11c5SIlpo Järvinen 		merge = -1;
3179832d11c5SIlpo Järvinen 	} else {
3180832d11c5SIlpo Järvinen 		merge = to - 1;
3181832d11c5SIlpo Järvinen 
31829e903e08SEric Dumazet 		todo -= skb_frag_size(fragfrom);
3183832d11c5SIlpo Järvinen 		if (todo < 0) {
3184832d11c5SIlpo Järvinen 			if (skb_prepare_for_shift(skb) ||
3185832d11c5SIlpo Järvinen 			    skb_prepare_for_shift(tgt))
3186832d11c5SIlpo Järvinen 				return 0;
3187832d11c5SIlpo Järvinen 
31889f782db3SIlpo Järvinen 			/* All previous frag pointers might be stale! */
31899f782db3SIlpo Järvinen 			fragfrom = &skb_shinfo(skb)->frags[from];
3190832d11c5SIlpo Järvinen 			fragto = &skb_shinfo(tgt)->frags[merge];
3191832d11c5SIlpo Järvinen 
31929e903e08SEric Dumazet 			skb_frag_size_add(fragto, shiftlen);
31939e903e08SEric Dumazet 			skb_frag_size_sub(fragfrom, shiftlen);
3194832d11c5SIlpo Järvinen 			fragfrom->page_offset += shiftlen;
3195832d11c5SIlpo Järvinen 
3196832d11c5SIlpo Järvinen 			goto onlymerged;
3197832d11c5SIlpo Järvinen 		}
3198832d11c5SIlpo Järvinen 
3199832d11c5SIlpo Järvinen 		from++;
3200832d11c5SIlpo Järvinen 	}
3201832d11c5SIlpo Järvinen 
3202832d11c5SIlpo Järvinen 	/* Skip full, not-fitting skb to avoid expensive operations */
3203832d11c5SIlpo Järvinen 	if ((shiftlen == skb->len) &&
3204832d11c5SIlpo Järvinen 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3205832d11c5SIlpo Järvinen 		return 0;
3206832d11c5SIlpo Järvinen 
3207832d11c5SIlpo Järvinen 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3208832d11c5SIlpo Järvinen 		return 0;
3209832d11c5SIlpo Järvinen 
3210832d11c5SIlpo Järvinen 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3211832d11c5SIlpo Järvinen 		if (to == MAX_SKB_FRAGS)
3212832d11c5SIlpo Järvinen 			return 0;
3213832d11c5SIlpo Järvinen 
3214832d11c5SIlpo Järvinen 		fragfrom = &skb_shinfo(skb)->frags[from];
3215832d11c5SIlpo Järvinen 		fragto = &skb_shinfo(tgt)->frags[to];
3216832d11c5SIlpo Järvinen 
32179e903e08SEric Dumazet 		if (todo >= skb_frag_size(fragfrom)) {
3218832d11c5SIlpo Järvinen 			*fragto = *fragfrom;
32199e903e08SEric Dumazet 			todo -= skb_frag_size(fragfrom);
3220832d11c5SIlpo Järvinen 			from++;
3221832d11c5SIlpo Järvinen 			to++;
3222832d11c5SIlpo Järvinen 
3223832d11c5SIlpo Järvinen 		} else {
3224ea2ab693SIan Campbell 			__skb_frag_ref(fragfrom);
3225832d11c5SIlpo Järvinen 			fragto->page = fragfrom->page;
3226832d11c5SIlpo Järvinen 			fragto->page_offset = fragfrom->page_offset;
32279e903e08SEric Dumazet 			skb_frag_size_set(fragto, todo);
3228832d11c5SIlpo Järvinen 
3229832d11c5SIlpo Järvinen 			fragfrom->page_offset += todo;
32309e903e08SEric Dumazet 			skb_frag_size_sub(fragfrom, todo);
3231832d11c5SIlpo Järvinen 			todo = 0;
3232832d11c5SIlpo Järvinen 
3233832d11c5SIlpo Järvinen 			to++;
3234832d11c5SIlpo Järvinen 			break;
3235832d11c5SIlpo Järvinen 		}
3236832d11c5SIlpo Järvinen 	}
3237832d11c5SIlpo Järvinen 
3238832d11c5SIlpo Järvinen 	/* Ready to "commit" this state change to tgt */
3239832d11c5SIlpo Järvinen 	skb_shinfo(tgt)->nr_frags = to;
3240832d11c5SIlpo Järvinen 
3241832d11c5SIlpo Järvinen 	if (merge >= 0) {
3242832d11c5SIlpo Järvinen 		fragfrom = &skb_shinfo(skb)->frags[0];
3243832d11c5SIlpo Järvinen 		fragto = &skb_shinfo(tgt)->frags[merge];
3244832d11c5SIlpo Järvinen 
32459e903e08SEric Dumazet 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3246ea2ab693SIan Campbell 		__skb_frag_unref(fragfrom);
3247832d11c5SIlpo Järvinen 	}
3248832d11c5SIlpo Järvinen 
3249832d11c5SIlpo Järvinen 	/* Reposition in the original skb */
3250832d11c5SIlpo Järvinen 	to = 0;
3251832d11c5SIlpo Järvinen 	while (from < skb_shinfo(skb)->nr_frags)
3252832d11c5SIlpo Järvinen 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3253832d11c5SIlpo Järvinen 	skb_shinfo(skb)->nr_frags = to;
3254832d11c5SIlpo Järvinen 
3255832d11c5SIlpo Järvinen 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3256832d11c5SIlpo Järvinen 
3257832d11c5SIlpo Järvinen onlymerged:
3258832d11c5SIlpo Järvinen 	/* Most likely the tgt won't ever need its checksum anymore, skb on
3259832d11c5SIlpo Järvinen 	 * the other hand might need it if it needs to be resent
3260832d11c5SIlpo Järvinen 	 */
3261832d11c5SIlpo Järvinen 	tgt->ip_summed = CHECKSUM_PARTIAL;
3262832d11c5SIlpo Järvinen 	skb->ip_summed = CHECKSUM_PARTIAL;
3263832d11c5SIlpo Järvinen 
3264832d11c5SIlpo Järvinen 	/* Yak, is it really working this way? Some helper please? */
3265832d11c5SIlpo Järvinen 	skb->len -= shiftlen;
3266832d11c5SIlpo Järvinen 	skb->data_len -= shiftlen;
3267832d11c5SIlpo Järvinen 	skb->truesize -= shiftlen;
3268832d11c5SIlpo Järvinen 	tgt->len += shiftlen;
3269832d11c5SIlpo Järvinen 	tgt->data_len += shiftlen;
3270832d11c5SIlpo Järvinen 	tgt->truesize += shiftlen;
3271832d11c5SIlpo Järvinen 
3272832d11c5SIlpo Järvinen 	return shiftlen;
3273832d11c5SIlpo Järvinen }
3274832d11c5SIlpo Järvinen 
3275677e90edSThomas Graf /**
3276677e90edSThomas Graf  * skb_prepare_seq_read - Prepare a sequential read of skb data
3277677e90edSThomas Graf  * @skb: the buffer to read
3278677e90edSThomas Graf  * @from: lower offset of data to be read
3279677e90edSThomas Graf  * @to: upper offset of data to be read
3280677e90edSThomas Graf  * @st: state variable
3281677e90edSThomas Graf  *
3282677e90edSThomas Graf  * Initializes the specified state variable. Must be called before
3283677e90edSThomas Graf  * invoking skb_seq_read() for the first time.
3284677e90edSThomas Graf  */
3285677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3286677e90edSThomas Graf 			  unsigned int to, struct skb_seq_state *st)
3287677e90edSThomas Graf {
3288677e90edSThomas Graf 	st->lower_offset = from;
3289677e90edSThomas Graf 	st->upper_offset = to;
3290677e90edSThomas Graf 	st->root_skb = st->cur_skb = skb;
3291677e90edSThomas Graf 	st->frag_idx = st->stepped_offset = 0;
3292677e90edSThomas Graf 	st->frag_data = NULL;
3293677e90edSThomas Graf }
3294b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read);
3295677e90edSThomas Graf 
3296677e90edSThomas Graf /**
3297677e90edSThomas Graf  * skb_seq_read - Sequentially read skb data
3298677e90edSThomas Graf  * @consumed: number of bytes consumed by the caller so far
3299677e90edSThomas Graf  * @data: destination pointer for data to be returned
3300677e90edSThomas Graf  * @st: state variable
3301677e90edSThomas Graf  *
3302bc32383cSMathias Krause  * Reads a block of skb data at @consumed relative to the
3303677e90edSThomas Graf  * lower offset specified to skb_prepare_seq_read(). Assigns
3304bc32383cSMathias Krause  * the head of the data block to @data and returns the length
3305677e90edSThomas Graf  * of the block or 0 if the end of the skb data or the upper
3306677e90edSThomas Graf  * offset has been reached.
3307677e90edSThomas Graf  *
3308677e90edSThomas Graf  * The caller is not required to consume all of the data
3309bc32383cSMathias Krause  * returned, i.e. @consumed is typically set to the number
3310677e90edSThomas Graf  * of bytes already consumed and the next call to
3311677e90edSThomas Graf  * skb_seq_read() will return the remaining part of the block.
3312677e90edSThomas Graf  *
331325985edcSLucas De Marchi  * Note 1: The size of each block of data returned can be arbitrary,
3314e793c0f7SMasanari Iida  *       this limitation is the cost for zerocopy sequential
3315677e90edSThomas Graf  *       reads of potentially non linear data.
3316677e90edSThomas Graf  *
3317bc2cda1eSRandy Dunlap  * Note 2: Fragment lists within fragments are not implemented
3318677e90edSThomas Graf  *       at the moment, state->root_skb could be replaced with
3319677e90edSThomas Graf  *       a stack for this purpose.
3320677e90edSThomas Graf  */
3321677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3322677e90edSThomas Graf 			  struct skb_seq_state *st)
3323677e90edSThomas Graf {
3324677e90edSThomas Graf 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3325677e90edSThomas Graf 	skb_frag_t *frag;
3326677e90edSThomas Graf 
3327aeb193eaSWedson Almeida Filho 	if (unlikely(abs_offset >= st->upper_offset)) {
3328aeb193eaSWedson Almeida Filho 		if (st->frag_data) {
3329aeb193eaSWedson Almeida Filho 			kunmap_atomic(st->frag_data);
3330aeb193eaSWedson Almeida Filho 			st->frag_data = NULL;
3331aeb193eaSWedson Almeida Filho 		}
3332677e90edSThomas Graf 		return 0;
3333aeb193eaSWedson Almeida Filho 	}
3334677e90edSThomas Graf 
3335677e90edSThomas Graf next_skb:
333695e3b24cSHerbert Xu 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3337677e90edSThomas Graf 
3338995b3379SThomas Chenault 	if (abs_offset < block_limit && !st->frag_data) {
333995e3b24cSHerbert Xu 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3340677e90edSThomas Graf 		return block_limit - abs_offset;
3341677e90edSThomas Graf 	}
3342677e90edSThomas Graf 
3343677e90edSThomas Graf 	if (st->frag_idx == 0 && !st->frag_data)
3344677e90edSThomas Graf 		st->stepped_offset += skb_headlen(st->cur_skb);
3345677e90edSThomas Graf 
3346677e90edSThomas Graf 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3347677e90edSThomas Graf 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
33489e903e08SEric Dumazet 		block_limit = skb_frag_size(frag) + st->stepped_offset;
3349677e90edSThomas Graf 
3350677e90edSThomas Graf 		if (abs_offset < block_limit) {
3351677e90edSThomas Graf 			if (!st->frag_data)
335251c56b00SEric Dumazet 				st->frag_data = kmap_atomic(skb_frag_page(frag));
3353677e90edSThomas Graf 
3354677e90edSThomas Graf 			*data = (u8 *) st->frag_data + frag->page_offset +
3355677e90edSThomas Graf 				(abs_offset - st->stepped_offset);
3356677e90edSThomas Graf 
3357677e90edSThomas Graf 			return block_limit - abs_offset;
3358677e90edSThomas Graf 		}
3359677e90edSThomas Graf 
3360677e90edSThomas Graf 		if (st->frag_data) {
336151c56b00SEric Dumazet 			kunmap_atomic(st->frag_data);
3362677e90edSThomas Graf 			st->frag_data = NULL;
3363677e90edSThomas Graf 		}
3364677e90edSThomas Graf 
3365677e90edSThomas Graf 		st->frag_idx++;
33669e903e08SEric Dumazet 		st->stepped_offset += skb_frag_size(frag);
3367677e90edSThomas Graf 	}
3368677e90edSThomas Graf 
33695b5a60daSOlaf Kirch 	if (st->frag_data) {
337051c56b00SEric Dumazet 		kunmap_atomic(st->frag_data);
33715b5a60daSOlaf Kirch 		st->frag_data = NULL;
33725b5a60daSOlaf Kirch 	}
33735b5a60daSOlaf Kirch 
337421dc3301SDavid S. Miller 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3375677e90edSThomas Graf 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
337695e3b24cSHerbert Xu 		st->frag_idx = 0;
3377677e90edSThomas Graf 		goto next_skb;
337871b3346dSShyam Iyer 	} else if (st->cur_skb->next) {
337971b3346dSShyam Iyer 		st->cur_skb = st->cur_skb->next;
338071b3346dSShyam Iyer 		st->frag_idx = 0;
3381677e90edSThomas Graf 		goto next_skb;
3382677e90edSThomas Graf 	}
3383677e90edSThomas Graf 
3384677e90edSThomas Graf 	return 0;
3385677e90edSThomas Graf }
3386b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read);
3387677e90edSThomas Graf 
3388677e90edSThomas Graf /**
3389677e90edSThomas Graf  * skb_abort_seq_read - Abort a sequential read of skb data
3390677e90edSThomas Graf  * @st: state variable
3391677e90edSThomas Graf  *
3392677e90edSThomas Graf  * Must be called if skb_seq_read() was not called until it
3393677e90edSThomas Graf  * returned 0.
3394677e90edSThomas Graf  */
3395677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st)
3396677e90edSThomas Graf {
3397677e90edSThomas Graf 	if (st->frag_data)
339851c56b00SEric Dumazet 		kunmap_atomic(st->frag_data);
3399677e90edSThomas Graf }
3400b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read);
3401677e90edSThomas Graf 
34023fc7e8a6SThomas Graf #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
34033fc7e8a6SThomas Graf 
34043fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
34053fc7e8a6SThomas Graf 					  struct ts_config *conf,
34063fc7e8a6SThomas Graf 					  struct ts_state *state)
34073fc7e8a6SThomas Graf {
34083fc7e8a6SThomas Graf 	return skb_seq_read(offset, text, TS_SKB_CB(state));
34093fc7e8a6SThomas Graf }
34103fc7e8a6SThomas Graf 
34113fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
34123fc7e8a6SThomas Graf {
34133fc7e8a6SThomas Graf 	skb_abort_seq_read(TS_SKB_CB(state));
34143fc7e8a6SThomas Graf }
34153fc7e8a6SThomas Graf 
34163fc7e8a6SThomas Graf /**
34173fc7e8a6SThomas Graf  * skb_find_text - Find a text pattern in skb data
34183fc7e8a6SThomas Graf  * @skb: the buffer to look in
34193fc7e8a6SThomas Graf  * @from: search offset
34203fc7e8a6SThomas Graf  * @to: search limit
34213fc7e8a6SThomas Graf  * @config: textsearch configuration
34223fc7e8a6SThomas Graf  *
34233fc7e8a6SThomas Graf  * Finds a pattern in the skb data according to the specified
34243fc7e8a6SThomas Graf  * textsearch configuration. Use textsearch_next() to retrieve
34253fc7e8a6SThomas Graf  * subsequent occurrences of the pattern. Returns the offset
34263fc7e8a6SThomas Graf  * to the first occurrence or UINT_MAX if no match was found.
34273fc7e8a6SThomas Graf  */
34283fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3429059a2440SBojan Prtvar 			   unsigned int to, struct ts_config *config)
34303fc7e8a6SThomas Graf {
3431059a2440SBojan Prtvar 	struct ts_state state;
3432f72b948dSPhil Oester 	unsigned int ret;
3433f72b948dSPhil Oester 
34343fc7e8a6SThomas Graf 	config->get_next_block = skb_ts_get_next_block;
34353fc7e8a6SThomas Graf 	config->finish = skb_ts_finish;
34363fc7e8a6SThomas Graf 
3437059a2440SBojan Prtvar 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
34383fc7e8a6SThomas Graf 
3439059a2440SBojan Prtvar 	ret = textsearch_find(config, &state);
3440f72b948dSPhil Oester 	return (ret <= to - from ? ret : UINT_MAX);
34413fc7e8a6SThomas Graf }
3442b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text);
34433fc7e8a6SThomas Graf 
3444be12a1feSHannes Frederic Sowa int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3445be12a1feSHannes Frederic Sowa 			 int offset, size_t size)
3446be12a1feSHannes Frederic Sowa {
3447be12a1feSHannes Frederic Sowa 	int i = skb_shinfo(skb)->nr_frags;
3448be12a1feSHannes Frederic Sowa 
3449be12a1feSHannes Frederic Sowa 	if (skb_can_coalesce(skb, i, page, offset)) {
3450be12a1feSHannes Frederic Sowa 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3451be12a1feSHannes Frederic Sowa 	} else if (i < MAX_SKB_FRAGS) {
3452be12a1feSHannes Frederic Sowa 		get_page(page);
3453be12a1feSHannes Frederic Sowa 		skb_fill_page_desc(skb, i, page, offset, size);
3454be12a1feSHannes Frederic Sowa 	} else {
3455be12a1feSHannes Frederic Sowa 		return -EMSGSIZE;
3456be12a1feSHannes Frederic Sowa 	}
3457be12a1feSHannes Frederic Sowa 
3458be12a1feSHannes Frederic Sowa 	return 0;
3459be12a1feSHannes Frederic Sowa }
3460be12a1feSHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3461be12a1feSHannes Frederic Sowa 
3462cbb042f9SHerbert Xu /**
3463cbb042f9SHerbert Xu  *	skb_pull_rcsum - pull skb and update receive checksum
3464cbb042f9SHerbert Xu  *	@skb: buffer to update
3465cbb042f9SHerbert Xu  *	@len: length of data pulled
3466cbb042f9SHerbert Xu  *
3467cbb042f9SHerbert Xu  *	This function performs an skb_pull on the packet and updates
3468fee54fa5SUrs Thuermann  *	the CHECKSUM_COMPLETE checksum.  It should be used on
346984fa7933SPatrick McHardy  *	receive path processing instead of skb_pull unless you know
347084fa7933SPatrick McHardy  *	that the checksum difference is zero (e.g., a valid IP header)
347184fa7933SPatrick McHardy  *	or you are setting ip_summed to CHECKSUM_NONE.
3472cbb042f9SHerbert Xu  */
3473af72868bSJohannes Berg void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3474cbb042f9SHerbert Xu {
347531b33dfbSPravin B Shelar 	unsigned char *data = skb->data;
347631b33dfbSPravin B Shelar 
3477cbb042f9SHerbert Xu 	BUG_ON(len > skb->len);
347831b33dfbSPravin B Shelar 	__skb_pull(skb, len);
347931b33dfbSPravin B Shelar 	skb_postpull_rcsum(skb, data, len);
348031b33dfbSPravin B Shelar 	return skb->data;
3481cbb042f9SHerbert Xu }
3482f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3483f94691acSArnaldo Carvalho de Melo 
348413acc94eSYonghong Song static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
348513acc94eSYonghong Song {
348613acc94eSYonghong Song 	skb_frag_t head_frag;
348713acc94eSYonghong Song 	struct page *page;
348813acc94eSYonghong Song 
348913acc94eSYonghong Song 	page = virt_to_head_page(frag_skb->head);
349013acc94eSYonghong Song 	head_frag.page.p = page;
349113acc94eSYonghong Song 	head_frag.page_offset = frag_skb->data -
349213acc94eSYonghong Song 		(unsigned char *)page_address(page);
349313acc94eSYonghong Song 	head_frag.size = skb_headlen(frag_skb);
349413acc94eSYonghong Song 	return head_frag;
349513acc94eSYonghong Song }
349613acc94eSYonghong Song 
3497f4c50d99SHerbert Xu /**
3498f4c50d99SHerbert Xu  *	skb_segment - Perform protocol segmentation on skb.
3499df5771ffSMichael S. Tsirkin  *	@head_skb: buffer to segment
3500576a30ebSHerbert Xu  *	@features: features for the output path (see dev->features)
3501f4c50d99SHerbert Xu  *
3502f4c50d99SHerbert Xu  *	This function performs segmentation on the given skb.  It returns
35034c821d75SBen Hutchings  *	a pointer to the first in a list of new skbs for the segments.
35044c821d75SBen Hutchings  *	In case of error it returns ERR_PTR(err).
3505f4c50d99SHerbert Xu  */
3506df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb,
3507df5771ffSMichael S. Tsirkin 			    netdev_features_t features)
3508f4c50d99SHerbert Xu {
3509f4c50d99SHerbert Xu 	struct sk_buff *segs = NULL;
3510f4c50d99SHerbert Xu 	struct sk_buff *tail = NULL;
35111a4cedafSMichael S. Tsirkin 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3512df5771ffSMichael S. Tsirkin 	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3513df5771ffSMichael S. Tsirkin 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
3514df5771ffSMichael S. Tsirkin 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
35151fd819ecSMichael S. Tsirkin 	struct sk_buff *frag_skb = head_skb;
3516f4c50d99SHerbert Xu 	unsigned int offset = doffset;
3517df5771ffSMichael S. Tsirkin 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3518802ab55aSAlexander Duyck 	unsigned int partial_segs = 0;
3519f4c50d99SHerbert Xu 	unsigned int headroom;
3520802ab55aSAlexander Duyck 	unsigned int len = head_skb->len;
3521ec5f0615SPravin B Shelar 	__be16 proto;
352236c98382SAlexander Duyck 	bool csum, sg;
3523df5771ffSMichael S. Tsirkin 	int nfrags = skb_shinfo(head_skb)->nr_frags;
3524f4c50d99SHerbert Xu 	int err = -ENOMEM;
3525f4c50d99SHerbert Xu 	int i = 0;
3526f4c50d99SHerbert Xu 	int pos;
352753d6471cSVlad Yasevich 	int dummy;
3528f4c50d99SHerbert Xu 
35295882a07cSWei-Chun Chao 	__skb_push(head_skb, doffset);
353053d6471cSVlad Yasevich 	proto = skb_network_protocol(head_skb, &dummy);
3531ec5f0615SPravin B Shelar 	if (unlikely(!proto))
3532ec5f0615SPravin B Shelar 		return ERR_PTR(-EINVAL);
3533ec5f0615SPravin B Shelar 
353436c98382SAlexander Duyck 	sg = !!(features & NETIF_F_SG);
3535f245d079SAlexander Duyck 	csum = !!can_checksum_protocol(features, proto);
35367e2b10c1STom Herbert 
353707b26c94SSteffen Klassert 	if (sg && csum && (mss != GSO_BY_FRAGS))  {
353807b26c94SSteffen Klassert 		if (!(features & NETIF_F_GSO_PARTIAL)) {
353907b26c94SSteffen Klassert 			struct sk_buff *iter;
354043170c4eSIlan Tayari 			unsigned int frag_len;
354107b26c94SSteffen Klassert 
354207b26c94SSteffen Klassert 			if (!list_skb ||
354307b26c94SSteffen Klassert 			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
354407b26c94SSteffen Klassert 				goto normal;
354507b26c94SSteffen Klassert 
354643170c4eSIlan Tayari 			/* If we get here then all the required
354743170c4eSIlan Tayari 			 * GSO features except frag_list are supported.
354843170c4eSIlan Tayari 			 * Try to split the SKB to multiple GSO SKBs
354943170c4eSIlan Tayari 			 * with no frag_list.
355043170c4eSIlan Tayari 			 * Currently we can do that only when the buffers don't
355143170c4eSIlan Tayari 			 * have a linear part and all the buffers except
355243170c4eSIlan Tayari 			 * the last are of the same length.
355307b26c94SSteffen Klassert 			 */
355443170c4eSIlan Tayari 			frag_len = list_skb->len;
355507b26c94SSteffen Klassert 			skb_walk_frags(head_skb, iter) {
355643170c4eSIlan Tayari 				if (frag_len != iter->len && iter->next)
355743170c4eSIlan Tayari 					goto normal;
3558eaffadbbSIlan Tayari 				if (skb_headlen(iter) && !iter->head_frag)
355907b26c94SSteffen Klassert 					goto normal;
356007b26c94SSteffen Klassert 
356107b26c94SSteffen Klassert 				len -= iter->len;
356207b26c94SSteffen Klassert 			}
356343170c4eSIlan Tayari 
356443170c4eSIlan Tayari 			if (len != frag_len)
356543170c4eSIlan Tayari 				goto normal;
356607b26c94SSteffen Klassert 		}
356707b26c94SSteffen Klassert 
3568802ab55aSAlexander Duyck 		/* GSO partial only requires that we trim off any excess that
3569802ab55aSAlexander Duyck 		 * doesn't fit into an MSS sized block, so take care of that
3570802ab55aSAlexander Duyck 		 * now.
3571802ab55aSAlexander Duyck 		 */
3572802ab55aSAlexander Duyck 		partial_segs = len / mss;
3573d7fb5a80SAlexander Duyck 		if (partial_segs > 1)
3574802ab55aSAlexander Duyck 			mss *= partial_segs;
3575d7fb5a80SAlexander Duyck 		else
3576d7fb5a80SAlexander Duyck 			partial_segs = 0;
3577802ab55aSAlexander Duyck 	}
3578802ab55aSAlexander Duyck 
357907b26c94SSteffen Klassert normal:
3580df5771ffSMichael S. Tsirkin 	headroom = skb_headroom(head_skb);
3581df5771ffSMichael S. Tsirkin 	pos = skb_headlen(head_skb);
3582f4c50d99SHerbert Xu 
3583f4c50d99SHerbert Xu 	do {
3584f4c50d99SHerbert Xu 		struct sk_buff *nskb;
35858cb19905SMichael S. Tsirkin 		skb_frag_t *nskb_frag;
3586c8884eddSHerbert Xu 		int hsize;
3587f4c50d99SHerbert Xu 		int size;
3588f4c50d99SHerbert Xu 
35893953c46cSMarcelo Ricardo Leitner 		if (unlikely(mss == GSO_BY_FRAGS)) {
35903953c46cSMarcelo Ricardo Leitner 			len = list_skb->len;
35913953c46cSMarcelo Ricardo Leitner 		} else {
3592df5771ffSMichael S. Tsirkin 			len = head_skb->len - offset;
3593f4c50d99SHerbert Xu 			if (len > mss)
3594f4c50d99SHerbert Xu 				len = mss;
35953953c46cSMarcelo Ricardo Leitner 		}
3596f4c50d99SHerbert Xu 
3597df5771ffSMichael S. Tsirkin 		hsize = skb_headlen(head_skb) - offset;
3598f4c50d99SHerbert Xu 		if (hsize < 0)
3599f4c50d99SHerbert Xu 			hsize = 0;
3600c8884eddSHerbert Xu 		if (hsize > len || !sg)
3601c8884eddSHerbert Xu 			hsize = len;
3602f4c50d99SHerbert Xu 
36031a4cedafSMichael S. Tsirkin 		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
36041a4cedafSMichael S. Tsirkin 		    (skb_headlen(list_skb) == len || sg)) {
36051a4cedafSMichael S. Tsirkin 			BUG_ON(skb_headlen(list_skb) > len);
360689319d38SHerbert Xu 
36079d8506ccSHerbert Xu 			i = 0;
36081a4cedafSMichael S. Tsirkin 			nfrags = skb_shinfo(list_skb)->nr_frags;
36091a4cedafSMichael S. Tsirkin 			frag = skb_shinfo(list_skb)->frags;
36101fd819ecSMichael S. Tsirkin 			frag_skb = list_skb;
36111a4cedafSMichael S. Tsirkin 			pos += skb_headlen(list_skb);
36129d8506ccSHerbert Xu 
36139d8506ccSHerbert Xu 			while (pos < offset + len) {
36149d8506ccSHerbert Xu 				BUG_ON(i >= nfrags);
36159d8506ccSHerbert Xu 
36164e1beba1SMichael S. Tsirkin 				size = skb_frag_size(frag);
36179d8506ccSHerbert Xu 				if (pos + size > offset + len)
36189d8506ccSHerbert Xu 					break;
36199d8506ccSHerbert Xu 
36209d8506ccSHerbert Xu 				i++;
36219d8506ccSHerbert Xu 				pos += size;
36224e1beba1SMichael S. Tsirkin 				frag++;
36239d8506ccSHerbert Xu 			}
36249d8506ccSHerbert Xu 
36251a4cedafSMichael S. Tsirkin 			nskb = skb_clone(list_skb, GFP_ATOMIC);
36261a4cedafSMichael S. Tsirkin 			list_skb = list_skb->next;
362789319d38SHerbert Xu 
3628f4c50d99SHerbert Xu 			if (unlikely(!nskb))
3629f4c50d99SHerbert Xu 				goto err;
3630f4c50d99SHerbert Xu 
36319d8506ccSHerbert Xu 			if (unlikely(pskb_trim(nskb, len))) {
36329d8506ccSHerbert Xu 				kfree_skb(nskb);
36339d8506ccSHerbert Xu 				goto err;
36349d8506ccSHerbert Xu 			}
36359d8506ccSHerbert Xu 
3636ec47ea82SAlexander Duyck 			hsize = skb_end_offset(nskb);
363789319d38SHerbert Xu 			if (skb_cow_head(nskb, doffset + headroom)) {
363889319d38SHerbert Xu 				kfree_skb(nskb);
363989319d38SHerbert Xu 				goto err;
364089319d38SHerbert Xu 			}
364189319d38SHerbert Xu 
3642ec47ea82SAlexander Duyck 			nskb->truesize += skb_end_offset(nskb) - hsize;
364389319d38SHerbert Xu 			skb_release_head_state(nskb);
364489319d38SHerbert Xu 			__skb_push(nskb, doffset);
364589319d38SHerbert Xu 		} else {
3646c93bdd0eSMel Gorman 			nskb = __alloc_skb(hsize + doffset + headroom,
3647df5771ffSMichael S. Tsirkin 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3648c93bdd0eSMel Gorman 					   NUMA_NO_NODE);
364989319d38SHerbert Xu 
365089319d38SHerbert Xu 			if (unlikely(!nskb))
365189319d38SHerbert Xu 				goto err;
365289319d38SHerbert Xu 
365389319d38SHerbert Xu 			skb_reserve(nskb, headroom);
365489319d38SHerbert Xu 			__skb_put(nskb, doffset);
365589319d38SHerbert Xu 		}
365689319d38SHerbert Xu 
3657f4c50d99SHerbert Xu 		if (segs)
3658f4c50d99SHerbert Xu 			tail->next = nskb;
3659f4c50d99SHerbert Xu 		else
3660f4c50d99SHerbert Xu 			segs = nskb;
3661f4c50d99SHerbert Xu 		tail = nskb;
3662f4c50d99SHerbert Xu 
3663df5771ffSMichael S. Tsirkin 		__copy_skb_header(nskb, head_skb);
3664f4c50d99SHerbert Xu 
3665030737bcSEric Dumazet 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3666fcdfe3a7SVlad Yasevich 		skb_reset_mac_len(nskb);
366768c33163SPravin B Shelar 
3668df5771ffSMichael S. Tsirkin 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
366968c33163SPravin B Shelar 						 nskb->data - tnl_hlen,
367068c33163SPravin B Shelar 						 doffset + tnl_hlen);
367189319d38SHerbert Xu 
36729d8506ccSHerbert Xu 		if (nskb->len == len + doffset)
36731cdbcb79SSimon Horman 			goto perform_csum_check;
367489319d38SHerbert Xu 
36757fbeffedSAlexander Duyck 		if (!sg) {
36767fbeffedSAlexander Duyck 			if (!nskb->remcsum_offload)
36776f85a124SHerbert Xu 				nskb->ip_summed = CHECKSUM_NONE;
367876443456SAlexander Duyck 			SKB_GSO_CB(nskb)->csum =
367976443456SAlexander Duyck 				skb_copy_and_csum_bits(head_skb, offset,
3680f4c50d99SHerbert Xu 						       skb_put(nskb, len),
3681f4c50d99SHerbert Xu 						       len, 0);
36827e2b10c1STom Herbert 			SKB_GSO_CB(nskb)->csum_start =
3683de843723STom Herbert 				skb_headroom(nskb) + doffset;
3684f4c50d99SHerbert Xu 			continue;
3685f4c50d99SHerbert Xu 		}
3686f4c50d99SHerbert Xu 
36878cb19905SMichael S. Tsirkin 		nskb_frag = skb_shinfo(nskb)->frags;
3688f4c50d99SHerbert Xu 
3689df5771ffSMichael S. Tsirkin 		skb_copy_from_linear_data_offset(head_skb, offset,
3690d626f62bSArnaldo Carvalho de Melo 						 skb_put(nskb, hsize), hsize);
3691f4c50d99SHerbert Xu 
3692fff88030SWillem de Bruijn 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3693df5771ffSMichael S. Tsirkin 					      SKBTX_SHARED_FRAG;
3694cef401deSEric Dumazet 
3695bf5c25d6SWillem de Bruijn 		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3696bf5c25d6SWillem de Bruijn 		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3697bf5c25d6SWillem de Bruijn 			goto err;
3698bf5c25d6SWillem de Bruijn 
36999d8506ccSHerbert Xu 		while (pos < offset + len) {
37009d8506ccSHerbert Xu 			if (i >= nfrags) {
37019d8506ccSHerbert Xu 				i = 0;
37021a4cedafSMichael S. Tsirkin 				nfrags = skb_shinfo(list_skb)->nr_frags;
37031a4cedafSMichael S. Tsirkin 				frag = skb_shinfo(list_skb)->frags;
37041fd819ecSMichael S. Tsirkin 				frag_skb = list_skb;
370513acc94eSYonghong Song 				if (!skb_headlen(list_skb)) {
37069d8506ccSHerbert Xu 					BUG_ON(!nfrags);
370713acc94eSYonghong Song 				} else {
370813acc94eSYonghong Song 					BUG_ON(!list_skb->head_frag);
37099d8506ccSHerbert Xu 
371013acc94eSYonghong Song 					/* to make room for head_frag. */
371113acc94eSYonghong Song 					i--;
371213acc94eSYonghong Song 					frag--;
371313acc94eSYonghong Song 				}
3714bf5c25d6SWillem de Bruijn 				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3715bf5c25d6SWillem de Bruijn 				    skb_zerocopy_clone(nskb, frag_skb,
3716bf5c25d6SWillem de Bruijn 						       GFP_ATOMIC))
3717bf5c25d6SWillem de Bruijn 					goto err;
3718bf5c25d6SWillem de Bruijn 
37191a4cedafSMichael S. Tsirkin 				list_skb = list_skb->next;
37209d8506ccSHerbert Xu 			}
37219d8506ccSHerbert Xu 
37229d8506ccSHerbert Xu 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
37239d8506ccSHerbert Xu 				     MAX_SKB_FRAGS)) {
37249d8506ccSHerbert Xu 				net_warn_ratelimited(
37259d8506ccSHerbert Xu 					"skb_segment: too many frags: %u %u\n",
37269d8506ccSHerbert Xu 					pos, mss);
3727ff907a11SEric Dumazet 				err = -EINVAL;
37289d8506ccSHerbert Xu 				goto err;
37299d8506ccSHerbert Xu 			}
37309d8506ccSHerbert Xu 
373113acc94eSYonghong Song 			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
37328cb19905SMichael S. Tsirkin 			__skb_frag_ref(nskb_frag);
37338cb19905SMichael S. Tsirkin 			size = skb_frag_size(nskb_frag);
3734f4c50d99SHerbert Xu 
3735f4c50d99SHerbert Xu 			if (pos < offset) {
37368cb19905SMichael S. Tsirkin 				nskb_frag->page_offset += offset - pos;
37378cb19905SMichael S. Tsirkin 				skb_frag_size_sub(nskb_frag, offset - pos);
3738f4c50d99SHerbert Xu 			}
3739f4c50d99SHerbert Xu 
374089319d38SHerbert Xu 			skb_shinfo(nskb)->nr_frags++;
3741f4c50d99SHerbert Xu 
3742f4c50d99SHerbert Xu 			if (pos + size <= offset + len) {
3743f4c50d99SHerbert Xu 				i++;
37444e1beba1SMichael S. Tsirkin 				frag++;
3745f4c50d99SHerbert Xu 				pos += size;
3746f4c50d99SHerbert Xu 			} else {
37478cb19905SMichael S. Tsirkin 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
374889319d38SHerbert Xu 				goto skip_fraglist;
3749f4c50d99SHerbert Xu 			}
3750f4c50d99SHerbert Xu 
37518cb19905SMichael S. Tsirkin 			nskb_frag++;
3752f4c50d99SHerbert Xu 		}
3753f4c50d99SHerbert Xu 
375489319d38SHerbert Xu skip_fraglist:
3755f4c50d99SHerbert Xu 		nskb->data_len = len - hsize;
3756f4c50d99SHerbert Xu 		nskb->len += nskb->data_len;
3757f4c50d99SHerbert Xu 		nskb->truesize += nskb->data_len;
3758ec5f0615SPravin B Shelar 
37591cdbcb79SSimon Horman perform_csum_check:
37607fbeffedSAlexander Duyck 		if (!csum) {
3761ff907a11SEric Dumazet 			if (skb_has_shared_frag(nskb) &&
3762ff907a11SEric Dumazet 			    __skb_linearize(nskb))
3763ddff00d4SAlexander Duyck 				goto err;
3764ff907a11SEric Dumazet 
37657fbeffedSAlexander Duyck 			if (!nskb->remcsum_offload)
3766ec5f0615SPravin B Shelar 				nskb->ip_summed = CHECKSUM_NONE;
376776443456SAlexander Duyck 			SKB_GSO_CB(nskb)->csum =
376876443456SAlexander Duyck 				skb_checksum(nskb, doffset,
376976443456SAlexander Duyck 					     nskb->len - doffset, 0);
37707e2b10c1STom Herbert 			SKB_GSO_CB(nskb)->csum_start =
37717e2b10c1STom Herbert 				skb_headroom(nskb) + doffset;
3772ec5f0615SPravin B Shelar 		}
3773df5771ffSMichael S. Tsirkin 	} while ((offset += len) < head_skb->len);
3774f4c50d99SHerbert Xu 
3775bec3cfdcSEric Dumazet 	/* Some callers want to get the end of the list.
3776bec3cfdcSEric Dumazet 	 * Put it in segs->prev to avoid walking the list.
3777bec3cfdcSEric Dumazet 	 * (see validate_xmit_skb_list() for example)
3778bec3cfdcSEric Dumazet 	 */
3779bec3cfdcSEric Dumazet 	segs->prev = tail;
3780432c856fSToshiaki Makita 
3781802ab55aSAlexander Duyck 	if (partial_segs) {
378207b26c94SSteffen Klassert 		struct sk_buff *iter;
3783802ab55aSAlexander Duyck 		int type = skb_shinfo(head_skb)->gso_type;
378407b26c94SSteffen Klassert 		unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3785802ab55aSAlexander Duyck 
3786802ab55aSAlexander Duyck 		/* Update type to add partial and then remove dodgy if set */
378707b26c94SSteffen Klassert 		type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3788802ab55aSAlexander Duyck 		type &= ~SKB_GSO_DODGY;
3789802ab55aSAlexander Duyck 
3790802ab55aSAlexander Duyck 		/* Update GSO info and prepare to start updating headers on
3791802ab55aSAlexander Duyck 		 * our way back down the stack of protocols.
3792802ab55aSAlexander Duyck 		 */
379307b26c94SSteffen Klassert 		for (iter = segs; iter; iter = iter->next) {
379407b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_size = gso_size;
379507b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_segs = partial_segs;
379607b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_type = type;
379707b26c94SSteffen Klassert 			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
379807b26c94SSteffen Klassert 		}
379907b26c94SSteffen Klassert 
380007b26c94SSteffen Klassert 		if (tail->len - doffset <= gso_size)
380107b26c94SSteffen Klassert 			skb_shinfo(tail)->gso_size = 0;
380207b26c94SSteffen Klassert 		else if (tail != segs)
380307b26c94SSteffen Klassert 			skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3804802ab55aSAlexander Duyck 	}
3805802ab55aSAlexander Duyck 
3806432c856fSToshiaki Makita 	/* Following permits correct backpressure, for protocols
3807432c856fSToshiaki Makita 	 * using skb_set_owner_w().
3808432c856fSToshiaki Makita 	 * Idea is to tranfert ownership from head_skb to last segment.
3809432c856fSToshiaki Makita 	 */
3810432c856fSToshiaki Makita 	if (head_skb->destructor == sock_wfree) {
3811432c856fSToshiaki Makita 		swap(tail->truesize, head_skb->truesize);
3812432c856fSToshiaki Makita 		swap(tail->destructor, head_skb->destructor);
3813432c856fSToshiaki Makita 		swap(tail->sk, head_skb->sk);
3814432c856fSToshiaki Makita 	}
3815f4c50d99SHerbert Xu 	return segs;
3816f4c50d99SHerbert Xu 
3817f4c50d99SHerbert Xu err:
3818289dccbeSEric Dumazet 	kfree_skb_list(segs);
3819f4c50d99SHerbert Xu 	return ERR_PTR(err);
3820f4c50d99SHerbert Xu }
3821f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment);
3822f4c50d99SHerbert Xu 
3823d4546c25SDavid Miller int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
382471d93b39SHerbert Xu {
38258a29111cSEric Dumazet 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
382667147ba9SHerbert Xu 	unsigned int offset = skb_gro_offset(skb);
382767147ba9SHerbert Xu 	unsigned int headlen = skb_headlen(skb);
38288a29111cSEric Dumazet 	unsigned int len = skb_gro_len(skb);
3829715dc1f3SEric Dumazet 	unsigned int delta_truesize;
3830d4546c25SDavid Miller 	struct sk_buff *lp;
383171d93b39SHerbert Xu 
38320ab03f35SSteffen Klassert 	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
383371d93b39SHerbert Xu 		return -E2BIG;
383471d93b39SHerbert Xu 
383529e98242SEric Dumazet 	lp = NAPI_GRO_CB(p)->last;
38368a29111cSEric Dumazet 	pinfo = skb_shinfo(lp);
38378a29111cSEric Dumazet 
38388a29111cSEric Dumazet 	if (headlen <= offset) {
383942da6994SHerbert Xu 		skb_frag_t *frag;
384066e92fcfSHerbert Xu 		skb_frag_t *frag2;
38419aaa156cSHerbert Xu 		int i = skbinfo->nr_frags;
38429aaa156cSHerbert Xu 		int nr_frags = pinfo->nr_frags + i;
384342da6994SHerbert Xu 
384466e92fcfSHerbert Xu 		if (nr_frags > MAX_SKB_FRAGS)
38458a29111cSEric Dumazet 			goto merge;
384681705ad1SHerbert Xu 
38478a29111cSEric Dumazet 		offset -= headlen;
38489aaa156cSHerbert Xu 		pinfo->nr_frags = nr_frags;
38499aaa156cSHerbert Xu 		skbinfo->nr_frags = 0;
3850f5572068SHerbert Xu 
38519aaa156cSHerbert Xu 		frag = pinfo->frags + nr_frags;
38529aaa156cSHerbert Xu 		frag2 = skbinfo->frags + i;
385366e92fcfSHerbert Xu 		do {
385466e92fcfSHerbert Xu 			*--frag = *--frag2;
385566e92fcfSHerbert Xu 		} while (--i);
385666e92fcfSHerbert Xu 
385766e92fcfSHerbert Xu 		frag->page_offset += offset;
38589e903e08SEric Dumazet 		skb_frag_size_sub(frag, offset);
385966e92fcfSHerbert Xu 
3860715dc1f3SEric Dumazet 		/* all fragments truesize : remove (head size + sk_buff) */
3861ec47ea82SAlexander Duyck 		delta_truesize = skb->truesize -
3862ec47ea82SAlexander Duyck 				 SKB_TRUESIZE(skb_end_offset(skb));
3863715dc1f3SEric Dumazet 
3864f5572068SHerbert Xu 		skb->truesize -= skb->data_len;
3865f5572068SHerbert Xu 		skb->len -= skb->data_len;
3866f5572068SHerbert Xu 		skb->data_len = 0;
3867f5572068SHerbert Xu 
3868715dc1f3SEric Dumazet 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
38695d38a079SHerbert Xu 		goto done;
3870d7e8883cSEric Dumazet 	} else if (skb->head_frag) {
3871d7e8883cSEric Dumazet 		int nr_frags = pinfo->nr_frags;
3872d7e8883cSEric Dumazet 		skb_frag_t *frag = pinfo->frags + nr_frags;
3873d7e8883cSEric Dumazet 		struct page *page = virt_to_head_page(skb->head);
3874d7e8883cSEric Dumazet 		unsigned int first_size = headlen - offset;
3875d7e8883cSEric Dumazet 		unsigned int first_offset;
3876d7e8883cSEric Dumazet 
3877d7e8883cSEric Dumazet 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
38788a29111cSEric Dumazet 			goto merge;
3879d7e8883cSEric Dumazet 
3880d7e8883cSEric Dumazet 		first_offset = skb->data -
3881d7e8883cSEric Dumazet 			       (unsigned char *)page_address(page) +
3882d7e8883cSEric Dumazet 			       offset;
3883d7e8883cSEric Dumazet 
3884d7e8883cSEric Dumazet 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3885d7e8883cSEric Dumazet 
3886d7e8883cSEric Dumazet 		frag->page.p	  = page;
3887d7e8883cSEric Dumazet 		frag->page_offset = first_offset;
3888d7e8883cSEric Dumazet 		skb_frag_size_set(frag, first_size);
3889d7e8883cSEric Dumazet 
3890d7e8883cSEric Dumazet 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3891d7e8883cSEric Dumazet 		/* We dont need to clear skbinfo->nr_frags here */
3892d7e8883cSEric Dumazet 
3893715dc1f3SEric Dumazet 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3894d7e8883cSEric Dumazet 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3895d7e8883cSEric Dumazet 		goto done;
38968a29111cSEric Dumazet 	}
389771d93b39SHerbert Xu 
389871d93b39SHerbert Xu merge:
3899715dc1f3SEric Dumazet 	delta_truesize = skb->truesize;
390067147ba9SHerbert Xu 	if (offset > headlen) {
3901d1dc7abfSMichal Schmidt 		unsigned int eat = offset - headlen;
3902d1dc7abfSMichal Schmidt 
3903d1dc7abfSMichal Schmidt 		skbinfo->frags[0].page_offset += eat;
39049e903e08SEric Dumazet 		skb_frag_size_sub(&skbinfo->frags[0], eat);
3905d1dc7abfSMichal Schmidt 		skb->data_len -= eat;
3906d1dc7abfSMichal Schmidt 		skb->len -= eat;
390767147ba9SHerbert Xu 		offset = headlen;
390856035022SHerbert Xu 	}
390956035022SHerbert Xu 
391067147ba9SHerbert Xu 	__skb_pull(skb, offset);
391156035022SHerbert Xu 
391229e98242SEric Dumazet 	if (NAPI_GRO_CB(p)->last == p)
39138a29111cSEric Dumazet 		skb_shinfo(p)->frag_list = skb;
39148a29111cSEric Dumazet 	else
3915c3c7c254SEric Dumazet 		NAPI_GRO_CB(p)->last->next = skb;
3916c3c7c254SEric Dumazet 	NAPI_GRO_CB(p)->last = skb;
3917f4a775d1SEric Dumazet 	__skb_header_release(skb);
39188a29111cSEric Dumazet 	lp = p;
391971d93b39SHerbert Xu 
39205d38a079SHerbert Xu done:
39215d38a079SHerbert Xu 	NAPI_GRO_CB(p)->count++;
392237fe4732SHerbert Xu 	p->data_len += len;
3923715dc1f3SEric Dumazet 	p->truesize += delta_truesize;
392437fe4732SHerbert Xu 	p->len += len;
39258a29111cSEric Dumazet 	if (lp != p) {
39268a29111cSEric Dumazet 		lp->data_len += len;
39278a29111cSEric Dumazet 		lp->truesize += delta_truesize;
39288a29111cSEric Dumazet 		lp->len += len;
39298a29111cSEric Dumazet 	}
393071d93b39SHerbert Xu 	NAPI_GRO_CB(skb)->same_flow = 1;
393171d93b39SHerbert Xu 	return 0;
393271d93b39SHerbert Xu }
393357c05650SMarcelo Ricardo Leitner EXPORT_SYMBOL_GPL(skb_gro_receive);
393471d93b39SHerbert Xu 
3935df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
3936df5042f4SFlorian Westphal #define SKB_EXT_ALIGN_VALUE	8
3937df5042f4SFlorian Westphal #define SKB_EXT_CHUNKSIZEOF(x)	(ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
3938df5042f4SFlorian Westphal 
3939df5042f4SFlorian Westphal static const u8 skb_ext_type_len[] = {
3940df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3941df5042f4SFlorian Westphal 	[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
3942df5042f4SFlorian Westphal #endif
39434165079bSFlorian Westphal #ifdef CONFIG_XFRM
39444165079bSFlorian Westphal 	[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
39454165079bSFlorian Westphal #endif
3946df5042f4SFlorian Westphal };
3947df5042f4SFlorian Westphal 
3948df5042f4SFlorian Westphal static __always_inline unsigned int skb_ext_total_length(void)
3949df5042f4SFlorian Westphal {
3950df5042f4SFlorian Westphal 	return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
3951df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3952df5042f4SFlorian Westphal 		skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
3953df5042f4SFlorian Westphal #endif
39544165079bSFlorian Westphal #ifdef CONFIG_XFRM
39554165079bSFlorian Westphal 		skb_ext_type_len[SKB_EXT_SEC_PATH] +
39564165079bSFlorian Westphal #endif
3957df5042f4SFlorian Westphal 		0;
3958df5042f4SFlorian Westphal }
3959df5042f4SFlorian Westphal 
3960df5042f4SFlorian Westphal static void skb_extensions_init(void)
3961df5042f4SFlorian Westphal {
3962df5042f4SFlorian Westphal 	BUILD_BUG_ON(SKB_EXT_NUM >= 8);
3963df5042f4SFlorian Westphal 	BUILD_BUG_ON(skb_ext_total_length() > 255);
3964df5042f4SFlorian Westphal 
3965df5042f4SFlorian Westphal 	skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
3966df5042f4SFlorian Westphal 					     SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
3967df5042f4SFlorian Westphal 					     0,
3968df5042f4SFlorian Westphal 					     SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3969df5042f4SFlorian Westphal 					     NULL);
3970df5042f4SFlorian Westphal }
3971df5042f4SFlorian Westphal #else
3972df5042f4SFlorian Westphal static void skb_extensions_init(void) {}
3973df5042f4SFlorian Westphal #endif
3974df5042f4SFlorian Westphal 
39751da177e4SLinus Torvalds void __init skb_init(void)
39761da177e4SLinus Torvalds {
397779a8a642SKees Cook 	skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
39781da177e4SLinus Torvalds 					      sizeof(struct sk_buff),
39791da177e4SLinus Torvalds 					      0,
3980e5d679f3SAlexey Dobriyan 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
398179a8a642SKees Cook 					      offsetof(struct sk_buff, cb),
398279a8a642SKees Cook 					      sizeof_field(struct sk_buff, cb),
398320c2df83SPaul Mundt 					      NULL);
3984d179cd12SDavid S. Miller 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3985d0bf4a9eSEric Dumazet 						sizeof(struct sk_buff_fclones),
3986d179cd12SDavid S. Miller 						0,
3987e5d679f3SAlexey Dobriyan 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
398820c2df83SPaul Mundt 						NULL);
3989df5042f4SFlorian Westphal 	skb_extensions_init();
39901da177e4SLinus Torvalds }
39911da177e4SLinus Torvalds 
399251c739d1SDavid S. Miller static int
399348a1df65SJason A. Donenfeld __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
399448a1df65SJason A. Donenfeld 	       unsigned int recursion_level)
3995716ea3a7SDavid Howells {
39961a028e50SDavid S. Miller 	int start = skb_headlen(skb);
39971a028e50SDavid S. Miller 	int i, copy = start - offset;
3998fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
3999716ea3a7SDavid Howells 	int elt = 0;
4000716ea3a7SDavid Howells 
400148a1df65SJason A. Donenfeld 	if (unlikely(recursion_level >= 24))
400248a1df65SJason A. Donenfeld 		return -EMSGSIZE;
400348a1df65SJason A. Donenfeld 
4004716ea3a7SDavid Howells 	if (copy > 0) {
4005716ea3a7SDavid Howells 		if (copy > len)
4006716ea3a7SDavid Howells 			copy = len;
4007642f1490SJens Axboe 		sg_set_buf(sg, skb->data + offset, copy);
4008716ea3a7SDavid Howells 		elt++;
4009716ea3a7SDavid Howells 		if ((len -= copy) == 0)
4010716ea3a7SDavid Howells 			return elt;
4011716ea3a7SDavid Howells 		offset += copy;
4012716ea3a7SDavid Howells 	}
4013716ea3a7SDavid Howells 
4014716ea3a7SDavid Howells 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
40151a028e50SDavid S. Miller 		int end;
4016716ea3a7SDavid Howells 
4017547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
40181a028e50SDavid S. Miller 
40199e903e08SEric Dumazet 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4020716ea3a7SDavid Howells 		if ((copy = end - offset) > 0) {
4021716ea3a7SDavid Howells 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
402248a1df65SJason A. Donenfeld 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
402348a1df65SJason A. Donenfeld 				return -EMSGSIZE;
4024716ea3a7SDavid Howells 
4025716ea3a7SDavid Howells 			if (copy > len)
4026716ea3a7SDavid Howells 				copy = len;
4027ea2ab693SIan Campbell 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4028642f1490SJens Axboe 					frag->page_offset+offset-start);
4029716ea3a7SDavid Howells 			elt++;
4030716ea3a7SDavid Howells 			if (!(len -= copy))
4031716ea3a7SDavid Howells 				return elt;
4032716ea3a7SDavid Howells 			offset += copy;
4033716ea3a7SDavid Howells 		}
40341a028e50SDavid S. Miller 		start = end;
4035716ea3a7SDavid Howells 	}
4036716ea3a7SDavid Howells 
4037fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
403848a1df65SJason A. Donenfeld 		int end, ret;
4039716ea3a7SDavid Howells 
4040547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
40411a028e50SDavid S. Miller 
4042fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
4043716ea3a7SDavid Howells 		if ((copy = end - offset) > 0) {
404448a1df65SJason A. Donenfeld 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
404548a1df65SJason A. Donenfeld 				return -EMSGSIZE;
404648a1df65SJason A. Donenfeld 
4047716ea3a7SDavid Howells 			if (copy > len)
4048716ea3a7SDavid Howells 				copy = len;
404948a1df65SJason A. Donenfeld 			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
405048a1df65SJason A. Donenfeld 					      copy, recursion_level + 1);
405148a1df65SJason A. Donenfeld 			if (unlikely(ret < 0))
405248a1df65SJason A. Donenfeld 				return ret;
405348a1df65SJason A. Donenfeld 			elt += ret;
4054716ea3a7SDavid Howells 			if ((len -= copy) == 0)
4055716ea3a7SDavid Howells 				return elt;
4056716ea3a7SDavid Howells 			offset += copy;
4057716ea3a7SDavid Howells 		}
40581a028e50SDavid S. Miller 		start = end;
4059716ea3a7SDavid Howells 	}
4060716ea3a7SDavid Howells 	BUG_ON(len);
4061716ea3a7SDavid Howells 	return elt;
4062716ea3a7SDavid Howells }
4063716ea3a7SDavid Howells 
406448a1df65SJason A. Donenfeld /**
406548a1df65SJason A. Donenfeld  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
406648a1df65SJason A. Donenfeld  *	@skb: Socket buffer containing the buffers to be mapped
406748a1df65SJason A. Donenfeld  *	@sg: The scatter-gather list to map into
406848a1df65SJason A. Donenfeld  *	@offset: The offset into the buffer's contents to start mapping
406948a1df65SJason A. Donenfeld  *	@len: Length of buffer space to be mapped
407048a1df65SJason A. Donenfeld  *
407148a1df65SJason A. Donenfeld  *	Fill the specified scatter-gather list with mappings/pointers into a
407248a1df65SJason A. Donenfeld  *	region of the buffer space attached to a socket buffer. Returns either
407348a1df65SJason A. Donenfeld  *	the number of scatterlist items used, or -EMSGSIZE if the contents
407448a1df65SJason A. Donenfeld  *	could not fit.
407548a1df65SJason A. Donenfeld  */
407648a1df65SJason A. Donenfeld int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
407748a1df65SJason A. Donenfeld {
407848a1df65SJason A. Donenfeld 	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
407948a1df65SJason A. Donenfeld 
408048a1df65SJason A. Donenfeld 	if (nsg <= 0)
408148a1df65SJason A. Donenfeld 		return nsg;
408248a1df65SJason A. Donenfeld 
408348a1df65SJason A. Donenfeld 	sg_mark_end(&sg[nsg - 1]);
408448a1df65SJason A. Donenfeld 
408548a1df65SJason A. Donenfeld 	return nsg;
408648a1df65SJason A. Donenfeld }
408748a1df65SJason A. Donenfeld EXPORT_SYMBOL_GPL(skb_to_sgvec);
408848a1df65SJason A. Donenfeld 
408925a91d8dSFan Du /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
409025a91d8dSFan Du  * sglist without mark the sg which contain last skb data as the end.
409125a91d8dSFan Du  * So the caller can mannipulate sg list as will when padding new data after
409225a91d8dSFan Du  * the first call without calling sg_unmark_end to expend sg list.
409325a91d8dSFan Du  *
409425a91d8dSFan Du  * Scenario to use skb_to_sgvec_nomark:
409525a91d8dSFan Du  * 1. sg_init_table
409625a91d8dSFan Du  * 2. skb_to_sgvec_nomark(payload1)
409725a91d8dSFan Du  * 3. skb_to_sgvec_nomark(payload2)
409825a91d8dSFan Du  *
409925a91d8dSFan Du  * This is equivalent to:
410025a91d8dSFan Du  * 1. sg_init_table
410125a91d8dSFan Du  * 2. skb_to_sgvec(payload1)
410225a91d8dSFan Du  * 3. sg_unmark_end
410325a91d8dSFan Du  * 4. skb_to_sgvec(payload2)
410425a91d8dSFan Du  *
410525a91d8dSFan Du  * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
410625a91d8dSFan Du  * is more preferable.
410725a91d8dSFan Du  */
410825a91d8dSFan Du int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
410925a91d8dSFan Du 			int offset, int len)
411025a91d8dSFan Du {
411148a1df65SJason A. Donenfeld 	return __skb_to_sgvec(skb, sg, offset, len, 0);
411225a91d8dSFan Du }
411325a91d8dSFan Du EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
411425a91d8dSFan Du 
411551c739d1SDavid S. Miller 
411651c739d1SDavid S. Miller 
4117716ea3a7SDavid Howells /**
4118716ea3a7SDavid Howells  *	skb_cow_data - Check that a socket buffer's data buffers are writable
4119716ea3a7SDavid Howells  *	@skb: The socket buffer to check.
4120716ea3a7SDavid Howells  *	@tailbits: Amount of trailing space to be added
4121716ea3a7SDavid Howells  *	@trailer: Returned pointer to the skb where the @tailbits space begins
4122716ea3a7SDavid Howells  *
4123716ea3a7SDavid Howells  *	Make sure that the data buffers attached to a socket buffer are
4124716ea3a7SDavid Howells  *	writable. If they are not, private copies are made of the data buffers
4125716ea3a7SDavid Howells  *	and the socket buffer is set to use these instead.
4126716ea3a7SDavid Howells  *
4127716ea3a7SDavid Howells  *	If @tailbits is given, make sure that there is space to write @tailbits
4128716ea3a7SDavid Howells  *	bytes of data beyond current end of socket buffer.  @trailer will be
4129716ea3a7SDavid Howells  *	set to point to the skb in which this space begins.
4130716ea3a7SDavid Howells  *
4131716ea3a7SDavid Howells  *	The number of scatterlist elements required to completely map the
4132716ea3a7SDavid Howells  *	COW'd and extended socket buffer will be returned.
4133716ea3a7SDavid Howells  */
4134716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4135716ea3a7SDavid Howells {
4136716ea3a7SDavid Howells 	int copyflag;
4137716ea3a7SDavid Howells 	int elt;
4138716ea3a7SDavid Howells 	struct sk_buff *skb1, **skb_p;
4139716ea3a7SDavid Howells 
4140716ea3a7SDavid Howells 	/* If skb is cloned or its head is paged, reallocate
4141716ea3a7SDavid Howells 	 * head pulling out all the pages (pages are considered not writable
4142716ea3a7SDavid Howells 	 * at the moment even if they are anonymous).
4143716ea3a7SDavid Howells 	 */
4144716ea3a7SDavid Howells 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4145716ea3a7SDavid Howells 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4146716ea3a7SDavid Howells 		return -ENOMEM;
4147716ea3a7SDavid Howells 
4148716ea3a7SDavid Howells 	/* Easy case. Most of packets will go this way. */
414921dc3301SDavid S. Miller 	if (!skb_has_frag_list(skb)) {
4150716ea3a7SDavid Howells 		/* A little of trouble, not enough of space for trailer.
4151716ea3a7SDavid Howells 		 * This should not happen, when stack is tuned to generate
4152716ea3a7SDavid Howells 		 * good frames. OK, on miss we reallocate and reserve even more
4153716ea3a7SDavid Howells 		 * space, 128 bytes is fair. */
4154716ea3a7SDavid Howells 
4155716ea3a7SDavid Howells 		if (skb_tailroom(skb) < tailbits &&
4156716ea3a7SDavid Howells 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4157716ea3a7SDavid Howells 			return -ENOMEM;
4158716ea3a7SDavid Howells 
4159716ea3a7SDavid Howells 		/* Voila! */
4160716ea3a7SDavid Howells 		*trailer = skb;
4161716ea3a7SDavid Howells 		return 1;
4162716ea3a7SDavid Howells 	}
4163716ea3a7SDavid Howells 
4164716ea3a7SDavid Howells 	/* Misery. We are in troubles, going to mincer fragments... */
4165716ea3a7SDavid Howells 
4166716ea3a7SDavid Howells 	elt = 1;
4167716ea3a7SDavid Howells 	skb_p = &skb_shinfo(skb)->frag_list;
4168716ea3a7SDavid Howells 	copyflag = 0;
4169716ea3a7SDavid Howells 
4170716ea3a7SDavid Howells 	while ((skb1 = *skb_p) != NULL) {
4171716ea3a7SDavid Howells 		int ntail = 0;
4172716ea3a7SDavid Howells 
4173716ea3a7SDavid Howells 		/* The fragment is partially pulled by someone,
4174716ea3a7SDavid Howells 		 * this can happen on input. Copy it and everything
4175716ea3a7SDavid Howells 		 * after it. */
4176716ea3a7SDavid Howells 
4177716ea3a7SDavid Howells 		if (skb_shared(skb1))
4178716ea3a7SDavid Howells 			copyflag = 1;
4179716ea3a7SDavid Howells 
4180716ea3a7SDavid Howells 		/* If the skb is the last, worry about trailer. */
4181716ea3a7SDavid Howells 
4182716ea3a7SDavid Howells 		if (skb1->next == NULL && tailbits) {
4183716ea3a7SDavid Howells 			if (skb_shinfo(skb1)->nr_frags ||
418421dc3301SDavid S. Miller 			    skb_has_frag_list(skb1) ||
4185716ea3a7SDavid Howells 			    skb_tailroom(skb1) < tailbits)
4186716ea3a7SDavid Howells 				ntail = tailbits + 128;
4187716ea3a7SDavid Howells 		}
4188716ea3a7SDavid Howells 
4189716ea3a7SDavid Howells 		if (copyflag ||
4190716ea3a7SDavid Howells 		    skb_cloned(skb1) ||
4191716ea3a7SDavid Howells 		    ntail ||
4192716ea3a7SDavid Howells 		    skb_shinfo(skb1)->nr_frags ||
419321dc3301SDavid S. Miller 		    skb_has_frag_list(skb1)) {
4194716ea3a7SDavid Howells 			struct sk_buff *skb2;
4195716ea3a7SDavid Howells 
4196716ea3a7SDavid Howells 			/* Fuck, we are miserable poor guys... */
4197716ea3a7SDavid Howells 			if (ntail == 0)
4198716ea3a7SDavid Howells 				skb2 = skb_copy(skb1, GFP_ATOMIC);
4199716ea3a7SDavid Howells 			else
4200716ea3a7SDavid Howells 				skb2 = skb_copy_expand(skb1,
4201716ea3a7SDavid Howells 						       skb_headroom(skb1),
4202716ea3a7SDavid Howells 						       ntail,
4203716ea3a7SDavid Howells 						       GFP_ATOMIC);
4204716ea3a7SDavid Howells 			if (unlikely(skb2 == NULL))
4205716ea3a7SDavid Howells 				return -ENOMEM;
4206716ea3a7SDavid Howells 
4207716ea3a7SDavid Howells 			if (skb1->sk)
4208716ea3a7SDavid Howells 				skb_set_owner_w(skb2, skb1->sk);
4209716ea3a7SDavid Howells 
4210716ea3a7SDavid Howells 			/* Looking around. Are we still alive?
4211716ea3a7SDavid Howells 			 * OK, link new skb, drop old one */
4212716ea3a7SDavid Howells 
4213716ea3a7SDavid Howells 			skb2->next = skb1->next;
4214716ea3a7SDavid Howells 			*skb_p = skb2;
4215716ea3a7SDavid Howells 			kfree_skb(skb1);
4216716ea3a7SDavid Howells 			skb1 = skb2;
4217716ea3a7SDavid Howells 		}
4218716ea3a7SDavid Howells 		elt++;
4219716ea3a7SDavid Howells 		*trailer = skb1;
4220716ea3a7SDavid Howells 		skb_p = &skb1->next;
4221716ea3a7SDavid Howells 	}
4222716ea3a7SDavid Howells 
4223716ea3a7SDavid Howells 	return elt;
4224716ea3a7SDavid Howells }
4225b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data);
4226716ea3a7SDavid Howells 
4227b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb)
4228b1faf566SEric Dumazet {
4229b1faf566SEric Dumazet 	struct sock *sk = skb->sk;
4230b1faf566SEric Dumazet 
4231b1faf566SEric Dumazet 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4232b1faf566SEric Dumazet }
4233b1faf566SEric Dumazet 
42348605330aSSoheil Hassas Yeganeh static void skb_set_err_queue(struct sk_buff *skb)
42358605330aSSoheil Hassas Yeganeh {
42368605330aSSoheil Hassas Yeganeh 	/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
42378605330aSSoheil Hassas Yeganeh 	 * So, it is safe to (mis)use it to mark skbs on the error queue.
42388605330aSSoheil Hassas Yeganeh 	 */
42398605330aSSoheil Hassas Yeganeh 	skb->pkt_type = PACKET_OUTGOING;
42408605330aSSoheil Hassas Yeganeh 	BUILD_BUG_ON(PACKET_OUTGOING == 0);
42418605330aSSoheil Hassas Yeganeh }
42428605330aSSoheil Hassas Yeganeh 
4243b1faf566SEric Dumazet /*
4244b1faf566SEric Dumazet  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4245b1faf566SEric Dumazet  */
4246b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4247b1faf566SEric Dumazet {
4248b1faf566SEric Dumazet 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
424995c96174SEric Dumazet 	    (unsigned int)sk->sk_rcvbuf)
4250b1faf566SEric Dumazet 		return -ENOMEM;
4251b1faf566SEric Dumazet 
4252b1faf566SEric Dumazet 	skb_orphan(skb);
4253b1faf566SEric Dumazet 	skb->sk = sk;
4254b1faf566SEric Dumazet 	skb->destructor = sock_rmem_free;
4255b1faf566SEric Dumazet 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
42568605330aSSoheil Hassas Yeganeh 	skb_set_err_queue(skb);
4257b1faf566SEric Dumazet 
4258abb57ea4SEric Dumazet 	/* before exiting rcu section, make sure dst is refcounted */
4259abb57ea4SEric Dumazet 	skb_dst_force(skb);
4260abb57ea4SEric Dumazet 
4261b1faf566SEric Dumazet 	skb_queue_tail(&sk->sk_error_queue, skb);
4262b1faf566SEric Dumazet 	if (!sock_flag(sk, SOCK_DEAD))
42636e5d58fdSVinicius Costa Gomes 		sk->sk_error_report(sk);
4264b1faf566SEric Dumazet 	return 0;
4265b1faf566SEric Dumazet }
4266b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb);
4267b1faf566SEric Dumazet 
426883a1a1a7SSoheil Hassas Yeganeh static bool is_icmp_err_skb(const struct sk_buff *skb)
426983a1a1a7SSoheil Hassas Yeganeh {
427083a1a1a7SSoheil Hassas Yeganeh 	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
427183a1a1a7SSoheil Hassas Yeganeh 		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
427283a1a1a7SSoheil Hassas Yeganeh }
427383a1a1a7SSoheil Hassas Yeganeh 
4274364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4275364a9e93SWillem de Bruijn {
4276364a9e93SWillem de Bruijn 	struct sk_buff_head *q = &sk->sk_error_queue;
427783a1a1a7SSoheil Hassas Yeganeh 	struct sk_buff *skb, *skb_next = NULL;
427883a1a1a7SSoheil Hassas Yeganeh 	bool icmp_next = false;
4279997d5c3fSEric Dumazet 	unsigned long flags;
4280364a9e93SWillem de Bruijn 
4281997d5c3fSEric Dumazet 	spin_lock_irqsave(&q->lock, flags);
4282364a9e93SWillem de Bruijn 	skb = __skb_dequeue(q);
428338b25793SSoheil Hassas Yeganeh 	if (skb && (skb_next = skb_peek(q))) {
428483a1a1a7SSoheil Hassas Yeganeh 		icmp_next = is_icmp_err_skb(skb_next);
428538b25793SSoheil Hassas Yeganeh 		if (icmp_next)
428638b25793SSoheil Hassas Yeganeh 			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
428738b25793SSoheil Hassas Yeganeh 	}
4288997d5c3fSEric Dumazet 	spin_unlock_irqrestore(&q->lock, flags);
4289364a9e93SWillem de Bruijn 
429083a1a1a7SSoheil Hassas Yeganeh 	if (is_icmp_err_skb(skb) && !icmp_next)
429183a1a1a7SSoheil Hassas Yeganeh 		sk->sk_err = 0;
429283a1a1a7SSoheil Hassas Yeganeh 
429383a1a1a7SSoheil Hassas Yeganeh 	if (skb_next)
4294364a9e93SWillem de Bruijn 		sk->sk_error_report(sk);
4295364a9e93SWillem de Bruijn 
4296364a9e93SWillem de Bruijn 	return skb;
4297364a9e93SWillem de Bruijn }
4298364a9e93SWillem de Bruijn EXPORT_SYMBOL(sock_dequeue_err_skb);
4299364a9e93SWillem de Bruijn 
4300cab41c47SAlexander Duyck /**
4301cab41c47SAlexander Duyck  * skb_clone_sk - create clone of skb, and take reference to socket
4302cab41c47SAlexander Duyck  * @skb: the skb to clone
4303cab41c47SAlexander Duyck  *
4304cab41c47SAlexander Duyck  * This function creates a clone of a buffer that holds a reference on
4305cab41c47SAlexander Duyck  * sk_refcnt.  Buffers created via this function are meant to be
4306cab41c47SAlexander Duyck  * returned using sock_queue_err_skb, or free via kfree_skb.
4307cab41c47SAlexander Duyck  *
4308cab41c47SAlexander Duyck  * When passing buffers allocated with this function to sock_queue_err_skb
4309cab41c47SAlexander Duyck  * it is necessary to wrap the call with sock_hold/sock_put in order to
4310cab41c47SAlexander Duyck  * prevent the socket from being released prior to being enqueued on
4311cab41c47SAlexander Duyck  * the sk_error_queue.
4312cab41c47SAlexander Duyck  */
431362bccb8cSAlexander Duyck struct sk_buff *skb_clone_sk(struct sk_buff *skb)
431462bccb8cSAlexander Duyck {
431562bccb8cSAlexander Duyck 	struct sock *sk = skb->sk;
431662bccb8cSAlexander Duyck 	struct sk_buff *clone;
431762bccb8cSAlexander Duyck 
431841c6d650SReshetova, Elena 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
431962bccb8cSAlexander Duyck 		return NULL;
432062bccb8cSAlexander Duyck 
432162bccb8cSAlexander Duyck 	clone = skb_clone(skb, GFP_ATOMIC);
432262bccb8cSAlexander Duyck 	if (!clone) {
432362bccb8cSAlexander Duyck 		sock_put(sk);
432462bccb8cSAlexander Duyck 		return NULL;
432562bccb8cSAlexander Duyck 	}
432662bccb8cSAlexander Duyck 
432762bccb8cSAlexander Duyck 	clone->sk = sk;
432862bccb8cSAlexander Duyck 	clone->destructor = sock_efree;
432962bccb8cSAlexander Duyck 
433062bccb8cSAlexander Duyck 	return clone;
433162bccb8cSAlexander Duyck }
433262bccb8cSAlexander Duyck EXPORT_SYMBOL(skb_clone_sk);
433362bccb8cSAlexander Duyck 
433437846ef0SAlexander Duyck static void __skb_complete_tx_timestamp(struct sk_buff *skb,
433537846ef0SAlexander Duyck 					struct sock *sk,
43364ef1b286SSoheil Hassas Yeganeh 					int tstype,
43374ef1b286SSoheil Hassas Yeganeh 					bool opt_stats)
4338ac45f602SPatrick Ohly {
4339ac45f602SPatrick Ohly 	struct sock_exterr_skb *serr;
4340ac45f602SPatrick Ohly 	int err;
4341ac45f602SPatrick Ohly 
43424ef1b286SSoheil Hassas Yeganeh 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
43434ef1b286SSoheil Hassas Yeganeh 
4344ac45f602SPatrick Ohly 	serr = SKB_EXT_ERR(skb);
4345ac45f602SPatrick Ohly 	memset(serr, 0, sizeof(*serr));
4346ac45f602SPatrick Ohly 	serr->ee.ee_errno = ENOMSG;
4347ac45f602SPatrick Ohly 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4348e7fd2885SWillem de Bruijn 	serr->ee.ee_info = tstype;
43494ef1b286SSoheil Hassas Yeganeh 	serr->opt_stats = opt_stats;
43501862d620SWillem de Bruijn 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
43514ed2d765SWillem de Bruijn 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
435209c2d251SWillem de Bruijn 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
4353ac5cc977SWANG Cong 		if (sk->sk_protocol == IPPROTO_TCP &&
4354ac5cc977SWANG Cong 		    sk->sk_type == SOCK_STREAM)
43554ed2d765SWillem de Bruijn 			serr->ee.ee_data -= sk->sk_tskey;
43564ed2d765SWillem de Bruijn 	}
435729030374SEric Dumazet 
4358ac45f602SPatrick Ohly 	err = sock_queue_err_skb(sk, skb);
435929030374SEric Dumazet 
4360ac45f602SPatrick Ohly 	if (err)
4361ac45f602SPatrick Ohly 		kfree_skb(skb);
4362ac45f602SPatrick Ohly }
436337846ef0SAlexander Duyck 
4364b245be1fSWillem de Bruijn static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4365b245be1fSWillem de Bruijn {
4366b245be1fSWillem de Bruijn 	bool ret;
4367b245be1fSWillem de Bruijn 
4368b245be1fSWillem de Bruijn 	if (likely(sysctl_tstamp_allow_data || tsonly))
4369b245be1fSWillem de Bruijn 		return true;
4370b245be1fSWillem de Bruijn 
4371b245be1fSWillem de Bruijn 	read_lock_bh(&sk->sk_callback_lock);
4372b245be1fSWillem de Bruijn 	ret = sk->sk_socket && sk->sk_socket->file &&
4373b245be1fSWillem de Bruijn 	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4374b245be1fSWillem de Bruijn 	read_unlock_bh(&sk->sk_callback_lock);
4375b245be1fSWillem de Bruijn 	return ret;
4376b245be1fSWillem de Bruijn }
4377b245be1fSWillem de Bruijn 
437837846ef0SAlexander Duyck void skb_complete_tx_timestamp(struct sk_buff *skb,
437937846ef0SAlexander Duyck 			       struct skb_shared_hwtstamps *hwtstamps)
438037846ef0SAlexander Duyck {
438137846ef0SAlexander Duyck 	struct sock *sk = skb->sk;
438237846ef0SAlexander Duyck 
4383b245be1fSWillem de Bruijn 	if (!skb_may_tx_timestamp(sk, false))
438435b99dffSWillem de Bruijn 		goto err;
4385b245be1fSWillem de Bruijn 
43869ac25fc0SEric Dumazet 	/* Take a reference to prevent skb_orphan() from freeing the socket,
43879ac25fc0SEric Dumazet 	 * but only if the socket refcount is not zero.
43889ac25fc0SEric Dumazet 	 */
438941c6d650SReshetova, Elena 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
439037846ef0SAlexander Duyck 		*skb_hwtstamps(skb) = *hwtstamps;
43914ef1b286SSoheil Hassas Yeganeh 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
439237846ef0SAlexander Duyck 		sock_put(sk);
439335b99dffSWillem de Bruijn 		return;
439437846ef0SAlexander Duyck 	}
439535b99dffSWillem de Bruijn 
439635b99dffSWillem de Bruijn err:
439735b99dffSWillem de Bruijn 	kfree_skb(skb);
43989ac25fc0SEric Dumazet }
439937846ef0SAlexander Duyck EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
440037846ef0SAlexander Duyck 
440137846ef0SAlexander Duyck void __skb_tstamp_tx(struct sk_buff *orig_skb,
440237846ef0SAlexander Duyck 		     struct skb_shared_hwtstamps *hwtstamps,
440337846ef0SAlexander Duyck 		     struct sock *sk, int tstype)
440437846ef0SAlexander Duyck {
440537846ef0SAlexander Duyck 	struct sk_buff *skb;
44064ef1b286SSoheil Hassas Yeganeh 	bool tsonly, opt_stats = false;
440737846ef0SAlexander Duyck 
44083a8dd971SWillem de Bruijn 	if (!sk)
44093a8dd971SWillem de Bruijn 		return;
44103a8dd971SWillem de Bruijn 
4411b50a5c70SMiroslav Lichvar 	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4412b50a5c70SMiroslav Lichvar 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4413b50a5c70SMiroslav Lichvar 		return;
4414b50a5c70SMiroslav Lichvar 
44153a8dd971SWillem de Bruijn 	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
44163a8dd971SWillem de Bruijn 	if (!skb_may_tx_timestamp(sk, tsonly))
441737846ef0SAlexander Duyck 		return;
441837846ef0SAlexander Duyck 
44191c885808SFrancis Yan 	if (tsonly) {
44201c885808SFrancis Yan #ifdef CONFIG_INET
44211c885808SFrancis Yan 		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
44221c885808SFrancis Yan 		    sk->sk_protocol == IPPROTO_TCP &&
44234ef1b286SSoheil Hassas Yeganeh 		    sk->sk_type == SOCK_STREAM) {
44241c885808SFrancis Yan 			skb = tcp_get_timestamping_opt_stats(sk);
44254ef1b286SSoheil Hassas Yeganeh 			opt_stats = true;
44264ef1b286SSoheil Hassas Yeganeh 		} else
44271c885808SFrancis Yan #endif
44281c885808SFrancis Yan 			skb = alloc_skb(0, GFP_ATOMIC);
44291c885808SFrancis Yan 	} else {
443037846ef0SAlexander Duyck 		skb = skb_clone(orig_skb, GFP_ATOMIC);
44311c885808SFrancis Yan 	}
443237846ef0SAlexander Duyck 	if (!skb)
443337846ef0SAlexander Duyck 		return;
443437846ef0SAlexander Duyck 
443549ca0d8bSWillem de Bruijn 	if (tsonly) {
4436fff88030SWillem de Bruijn 		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4437fff88030SWillem de Bruijn 					     SKBTX_ANY_TSTAMP;
443849ca0d8bSWillem de Bruijn 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
443949ca0d8bSWillem de Bruijn 	}
444049ca0d8bSWillem de Bruijn 
444149ca0d8bSWillem de Bruijn 	if (hwtstamps)
444249ca0d8bSWillem de Bruijn 		*skb_hwtstamps(skb) = *hwtstamps;
444349ca0d8bSWillem de Bruijn 	else
444449ca0d8bSWillem de Bruijn 		skb->tstamp = ktime_get_real();
444549ca0d8bSWillem de Bruijn 
44464ef1b286SSoheil Hassas Yeganeh 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
444737846ef0SAlexander Duyck }
4448e7fd2885SWillem de Bruijn EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4449e7fd2885SWillem de Bruijn 
4450e7fd2885SWillem de Bruijn void skb_tstamp_tx(struct sk_buff *orig_skb,
4451e7fd2885SWillem de Bruijn 		   struct skb_shared_hwtstamps *hwtstamps)
4452e7fd2885SWillem de Bruijn {
4453e7fd2885SWillem de Bruijn 	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4454e7fd2885SWillem de Bruijn 			       SCM_TSTAMP_SND);
4455e7fd2885SWillem de Bruijn }
4456ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4457ac45f602SPatrick Ohly 
44586e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
44596e3e939fSJohannes Berg {
44606e3e939fSJohannes Berg 	struct sock *sk = skb->sk;
44616e3e939fSJohannes Berg 	struct sock_exterr_skb *serr;
4462dd4f1072SEric Dumazet 	int err = 1;
44636e3e939fSJohannes Berg 
44646e3e939fSJohannes Berg 	skb->wifi_acked_valid = 1;
44656e3e939fSJohannes Berg 	skb->wifi_acked = acked;
44666e3e939fSJohannes Berg 
44676e3e939fSJohannes Berg 	serr = SKB_EXT_ERR(skb);
44686e3e939fSJohannes Berg 	memset(serr, 0, sizeof(*serr));
44696e3e939fSJohannes Berg 	serr->ee.ee_errno = ENOMSG;
44706e3e939fSJohannes Berg 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
44716e3e939fSJohannes Berg 
4472dd4f1072SEric Dumazet 	/* Take a reference to prevent skb_orphan() from freeing the socket,
4473dd4f1072SEric Dumazet 	 * but only if the socket refcount is not zero.
4474dd4f1072SEric Dumazet 	 */
447541c6d650SReshetova, Elena 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
44766e3e939fSJohannes Berg 		err = sock_queue_err_skb(sk, skb);
4477dd4f1072SEric Dumazet 		sock_put(sk);
4478dd4f1072SEric Dumazet 	}
44796e3e939fSJohannes Berg 	if (err)
44806e3e939fSJohannes Berg 		kfree_skb(skb);
44816e3e939fSJohannes Berg }
44826e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
44836e3e939fSJohannes Berg 
4484f35d9d8aSRusty Russell /**
4485f35d9d8aSRusty Russell  * skb_partial_csum_set - set up and verify partial csum values for packet
4486f35d9d8aSRusty Russell  * @skb: the skb to set
4487f35d9d8aSRusty Russell  * @start: the number of bytes after skb->data to start checksumming.
4488f35d9d8aSRusty Russell  * @off: the offset from start to place the checksum.
4489f35d9d8aSRusty Russell  *
4490f35d9d8aSRusty Russell  * For untrusted partially-checksummed packets, we need to make sure the values
4491f35d9d8aSRusty Russell  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4492f35d9d8aSRusty Russell  *
4493f35d9d8aSRusty Russell  * This function checks and sets those values and skb->ip_summed: if this
4494f35d9d8aSRusty Russell  * returns false you should drop the packet.
4495f35d9d8aSRusty Russell  */
4496f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4497f35d9d8aSRusty Russell {
449852b5d6f5SEric Dumazet 	u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
449952b5d6f5SEric Dumazet 	u32 csum_start = skb_headroom(skb) + (u32)start;
450052b5d6f5SEric Dumazet 
450152b5d6f5SEric Dumazet 	if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
450252b5d6f5SEric Dumazet 		net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
450352b5d6f5SEric Dumazet 				     start, off, skb_headroom(skb), skb_headlen(skb));
4504f35d9d8aSRusty Russell 		return false;
4505f35d9d8aSRusty Russell 	}
4506f35d9d8aSRusty Russell 	skb->ip_summed = CHECKSUM_PARTIAL;
450752b5d6f5SEric Dumazet 	skb->csum_start = csum_start;
4508f35d9d8aSRusty Russell 	skb->csum_offset = off;
4509e5d5decaSJason Wang 	skb_set_transport_header(skb, start);
4510f35d9d8aSRusty Russell 	return true;
4511f35d9d8aSRusty Russell }
4512b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4513f35d9d8aSRusty Russell 
4514ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4515ed1f50c3SPaul Durrant 			       unsigned int max)
4516ed1f50c3SPaul Durrant {
4517ed1f50c3SPaul Durrant 	if (skb_headlen(skb) >= len)
4518ed1f50c3SPaul Durrant 		return 0;
4519ed1f50c3SPaul Durrant 
4520ed1f50c3SPaul Durrant 	/* If we need to pullup then pullup to the max, so we
4521ed1f50c3SPaul Durrant 	 * won't need to do it again.
4522ed1f50c3SPaul Durrant 	 */
4523ed1f50c3SPaul Durrant 	if (max > skb->len)
4524ed1f50c3SPaul Durrant 		max = skb->len;
4525ed1f50c3SPaul Durrant 
4526ed1f50c3SPaul Durrant 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4527ed1f50c3SPaul Durrant 		return -ENOMEM;
4528ed1f50c3SPaul Durrant 
4529ed1f50c3SPaul Durrant 	if (skb_headlen(skb) < len)
4530ed1f50c3SPaul Durrant 		return -EPROTO;
4531ed1f50c3SPaul Durrant 
4532ed1f50c3SPaul Durrant 	return 0;
4533ed1f50c3SPaul Durrant }
4534ed1f50c3SPaul Durrant 
4535f9708b43SJan Beulich #define MAX_TCP_HDR_LEN (15 * 4)
4536f9708b43SJan Beulich 
4537f9708b43SJan Beulich static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4538f9708b43SJan Beulich 				      typeof(IPPROTO_IP) proto,
4539f9708b43SJan Beulich 				      unsigned int off)
4540f9708b43SJan Beulich {
4541f9708b43SJan Beulich 	switch (proto) {
4542f9708b43SJan Beulich 		int err;
4543f9708b43SJan Beulich 
4544f9708b43SJan Beulich 	case IPPROTO_TCP:
4545f9708b43SJan Beulich 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4546f9708b43SJan Beulich 					  off + MAX_TCP_HDR_LEN);
4547f9708b43SJan Beulich 		if (!err && !skb_partial_csum_set(skb, off,
4548f9708b43SJan Beulich 						  offsetof(struct tcphdr,
4549f9708b43SJan Beulich 							   check)))
4550f9708b43SJan Beulich 			err = -EPROTO;
4551f9708b43SJan Beulich 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4552f9708b43SJan Beulich 
4553f9708b43SJan Beulich 	case IPPROTO_UDP:
4554f9708b43SJan Beulich 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4555f9708b43SJan Beulich 					  off + sizeof(struct udphdr));
4556f9708b43SJan Beulich 		if (!err && !skb_partial_csum_set(skb, off,
4557f9708b43SJan Beulich 						  offsetof(struct udphdr,
4558f9708b43SJan Beulich 							   check)))
4559f9708b43SJan Beulich 			err = -EPROTO;
4560f9708b43SJan Beulich 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4561f9708b43SJan Beulich 	}
4562f9708b43SJan Beulich 
4563f9708b43SJan Beulich 	return ERR_PTR(-EPROTO);
4564f9708b43SJan Beulich }
4565f9708b43SJan Beulich 
4566ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus
4567ed1f50c3SPaul Durrant  * maximally sized IP and TCP or UDP headers.
4568ed1f50c3SPaul Durrant  */
4569ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128
4570ed1f50c3SPaul Durrant 
4571f9708b43SJan Beulich static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4572ed1f50c3SPaul Durrant {
4573ed1f50c3SPaul Durrant 	unsigned int off;
4574ed1f50c3SPaul Durrant 	bool fragment;
4575f9708b43SJan Beulich 	__sum16 *csum;
4576ed1f50c3SPaul Durrant 	int err;
4577ed1f50c3SPaul Durrant 
4578ed1f50c3SPaul Durrant 	fragment = false;
4579ed1f50c3SPaul Durrant 
4580ed1f50c3SPaul Durrant 	err = skb_maybe_pull_tail(skb,
4581ed1f50c3SPaul Durrant 				  sizeof(struct iphdr),
4582ed1f50c3SPaul Durrant 				  MAX_IP_HDR_LEN);
4583ed1f50c3SPaul Durrant 	if (err < 0)
4584ed1f50c3SPaul Durrant 		goto out;
4585ed1f50c3SPaul Durrant 
4586ed1f50c3SPaul Durrant 	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4587ed1f50c3SPaul Durrant 		fragment = true;
4588ed1f50c3SPaul Durrant 
4589ed1f50c3SPaul Durrant 	off = ip_hdrlen(skb);
4590ed1f50c3SPaul Durrant 
4591ed1f50c3SPaul Durrant 	err = -EPROTO;
4592ed1f50c3SPaul Durrant 
4593ed1f50c3SPaul Durrant 	if (fragment)
4594ed1f50c3SPaul Durrant 		goto out;
4595ed1f50c3SPaul Durrant 
4596f9708b43SJan Beulich 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4597f9708b43SJan Beulich 	if (IS_ERR(csum))
4598f9708b43SJan Beulich 		return PTR_ERR(csum);
4599ed1f50c3SPaul Durrant 
4600ed1f50c3SPaul Durrant 	if (recalculate)
4601f9708b43SJan Beulich 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4602ed1f50c3SPaul Durrant 					   ip_hdr(skb)->daddr,
4603ed1f50c3SPaul Durrant 					   skb->len - off,
4604f9708b43SJan Beulich 					   ip_hdr(skb)->protocol, 0);
4605ed1f50c3SPaul Durrant 	err = 0;
4606ed1f50c3SPaul Durrant 
4607ed1f50c3SPaul Durrant out:
4608ed1f50c3SPaul Durrant 	return err;
4609ed1f50c3SPaul Durrant }
4610ed1f50c3SPaul Durrant 
4611ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus
4612ed1f50c3SPaul Durrant  * an IPv6 header, all options, and a maximal TCP or UDP header.
4613ed1f50c3SPaul Durrant  */
4614ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256
4615ed1f50c3SPaul Durrant 
4616ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \
4617ed1f50c3SPaul Durrant 	(type *)(skb_network_header(skb) + (off))
4618ed1f50c3SPaul Durrant 
4619ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4620ed1f50c3SPaul Durrant {
4621ed1f50c3SPaul Durrant 	int err;
4622ed1f50c3SPaul Durrant 	u8 nexthdr;
4623ed1f50c3SPaul Durrant 	unsigned int off;
4624ed1f50c3SPaul Durrant 	unsigned int len;
4625ed1f50c3SPaul Durrant 	bool fragment;
4626ed1f50c3SPaul Durrant 	bool done;
4627f9708b43SJan Beulich 	__sum16 *csum;
4628ed1f50c3SPaul Durrant 
4629ed1f50c3SPaul Durrant 	fragment = false;
4630ed1f50c3SPaul Durrant 	done = false;
4631ed1f50c3SPaul Durrant 
4632ed1f50c3SPaul Durrant 	off = sizeof(struct ipv6hdr);
4633ed1f50c3SPaul Durrant 
4634ed1f50c3SPaul Durrant 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4635ed1f50c3SPaul Durrant 	if (err < 0)
4636ed1f50c3SPaul Durrant 		goto out;
4637ed1f50c3SPaul Durrant 
4638ed1f50c3SPaul Durrant 	nexthdr = ipv6_hdr(skb)->nexthdr;
4639ed1f50c3SPaul Durrant 
4640ed1f50c3SPaul Durrant 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4641ed1f50c3SPaul Durrant 	while (off <= len && !done) {
4642ed1f50c3SPaul Durrant 		switch (nexthdr) {
4643ed1f50c3SPaul Durrant 		case IPPROTO_DSTOPTS:
4644ed1f50c3SPaul Durrant 		case IPPROTO_HOPOPTS:
4645ed1f50c3SPaul Durrant 		case IPPROTO_ROUTING: {
4646ed1f50c3SPaul Durrant 			struct ipv6_opt_hdr *hp;
4647ed1f50c3SPaul Durrant 
4648ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
4649ed1f50c3SPaul Durrant 						  off +
4650ed1f50c3SPaul Durrant 						  sizeof(struct ipv6_opt_hdr),
4651ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
4652ed1f50c3SPaul Durrant 			if (err < 0)
4653ed1f50c3SPaul Durrant 				goto out;
4654ed1f50c3SPaul Durrant 
4655ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4656ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
4657ed1f50c3SPaul Durrant 			off += ipv6_optlen(hp);
4658ed1f50c3SPaul Durrant 			break;
4659ed1f50c3SPaul Durrant 		}
4660ed1f50c3SPaul Durrant 		case IPPROTO_AH: {
4661ed1f50c3SPaul Durrant 			struct ip_auth_hdr *hp;
4662ed1f50c3SPaul Durrant 
4663ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
4664ed1f50c3SPaul Durrant 						  off +
4665ed1f50c3SPaul Durrant 						  sizeof(struct ip_auth_hdr),
4666ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
4667ed1f50c3SPaul Durrant 			if (err < 0)
4668ed1f50c3SPaul Durrant 				goto out;
4669ed1f50c3SPaul Durrant 
4670ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4671ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
4672ed1f50c3SPaul Durrant 			off += ipv6_authlen(hp);
4673ed1f50c3SPaul Durrant 			break;
4674ed1f50c3SPaul Durrant 		}
4675ed1f50c3SPaul Durrant 		case IPPROTO_FRAGMENT: {
4676ed1f50c3SPaul Durrant 			struct frag_hdr *hp;
4677ed1f50c3SPaul Durrant 
4678ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
4679ed1f50c3SPaul Durrant 						  off +
4680ed1f50c3SPaul Durrant 						  sizeof(struct frag_hdr),
4681ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
4682ed1f50c3SPaul Durrant 			if (err < 0)
4683ed1f50c3SPaul Durrant 				goto out;
4684ed1f50c3SPaul Durrant 
4685ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct frag_hdr, skb, off);
4686ed1f50c3SPaul Durrant 
4687ed1f50c3SPaul Durrant 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4688ed1f50c3SPaul Durrant 				fragment = true;
4689ed1f50c3SPaul Durrant 
4690ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
4691ed1f50c3SPaul Durrant 			off += sizeof(struct frag_hdr);
4692ed1f50c3SPaul Durrant 			break;
4693ed1f50c3SPaul Durrant 		}
4694ed1f50c3SPaul Durrant 		default:
4695ed1f50c3SPaul Durrant 			done = true;
4696ed1f50c3SPaul Durrant 			break;
4697ed1f50c3SPaul Durrant 		}
4698ed1f50c3SPaul Durrant 	}
4699ed1f50c3SPaul Durrant 
4700ed1f50c3SPaul Durrant 	err = -EPROTO;
4701ed1f50c3SPaul Durrant 
4702ed1f50c3SPaul Durrant 	if (!done || fragment)
4703ed1f50c3SPaul Durrant 		goto out;
4704ed1f50c3SPaul Durrant 
4705f9708b43SJan Beulich 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
4706f9708b43SJan Beulich 	if (IS_ERR(csum))
4707f9708b43SJan Beulich 		return PTR_ERR(csum);
4708ed1f50c3SPaul Durrant 
4709ed1f50c3SPaul Durrant 	if (recalculate)
4710f9708b43SJan Beulich 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4711ed1f50c3SPaul Durrant 					 &ipv6_hdr(skb)->daddr,
4712f9708b43SJan Beulich 					 skb->len - off, nexthdr, 0);
4713ed1f50c3SPaul Durrant 	err = 0;
4714ed1f50c3SPaul Durrant 
4715ed1f50c3SPaul Durrant out:
4716ed1f50c3SPaul Durrant 	return err;
4717ed1f50c3SPaul Durrant }
4718ed1f50c3SPaul Durrant 
4719ed1f50c3SPaul Durrant /**
4720ed1f50c3SPaul Durrant  * skb_checksum_setup - set up partial checksum offset
4721ed1f50c3SPaul Durrant  * @skb: the skb to set up
4722ed1f50c3SPaul Durrant  * @recalculate: if true the pseudo-header checksum will be recalculated
4723ed1f50c3SPaul Durrant  */
4724ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4725ed1f50c3SPaul Durrant {
4726ed1f50c3SPaul Durrant 	int err;
4727ed1f50c3SPaul Durrant 
4728ed1f50c3SPaul Durrant 	switch (skb->protocol) {
4729ed1f50c3SPaul Durrant 	case htons(ETH_P_IP):
4730f9708b43SJan Beulich 		err = skb_checksum_setup_ipv4(skb, recalculate);
4731ed1f50c3SPaul Durrant 		break;
4732ed1f50c3SPaul Durrant 
4733ed1f50c3SPaul Durrant 	case htons(ETH_P_IPV6):
4734ed1f50c3SPaul Durrant 		err = skb_checksum_setup_ipv6(skb, recalculate);
4735ed1f50c3SPaul Durrant 		break;
4736ed1f50c3SPaul Durrant 
4737ed1f50c3SPaul Durrant 	default:
4738ed1f50c3SPaul Durrant 		err = -EPROTO;
4739ed1f50c3SPaul Durrant 		break;
4740ed1f50c3SPaul Durrant 	}
4741ed1f50c3SPaul Durrant 
4742ed1f50c3SPaul Durrant 	return err;
4743ed1f50c3SPaul Durrant }
4744ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup);
4745ed1f50c3SPaul Durrant 
47469afd85c9SLinus Lüssing /**
47479afd85c9SLinus Lüssing  * skb_checksum_maybe_trim - maybe trims the given skb
47489afd85c9SLinus Lüssing  * @skb: the skb to check
47499afd85c9SLinus Lüssing  * @transport_len: the data length beyond the network header
47509afd85c9SLinus Lüssing  *
47519afd85c9SLinus Lüssing  * Checks whether the given skb has data beyond the given transport length.
47529afd85c9SLinus Lüssing  * If so, returns a cloned skb trimmed to this transport length.
47539afd85c9SLinus Lüssing  * Otherwise returns the provided skb. Returns NULL in error cases
47549afd85c9SLinus Lüssing  * (e.g. transport_len exceeds skb length or out-of-memory).
47559afd85c9SLinus Lüssing  *
4756a516993fSLinus Lüssing  * Caller needs to set the skb transport header and free any returned skb if it
4757a516993fSLinus Lüssing  * differs from the provided skb.
47589afd85c9SLinus Lüssing  */
47599afd85c9SLinus Lüssing static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
47609afd85c9SLinus Lüssing 					       unsigned int transport_len)
47619afd85c9SLinus Lüssing {
47629afd85c9SLinus Lüssing 	struct sk_buff *skb_chk;
47639afd85c9SLinus Lüssing 	unsigned int len = skb_transport_offset(skb) + transport_len;
47649afd85c9SLinus Lüssing 	int ret;
47659afd85c9SLinus Lüssing 
4766a516993fSLinus Lüssing 	if (skb->len < len)
47679afd85c9SLinus Lüssing 		return NULL;
4768a516993fSLinus Lüssing 	else if (skb->len == len)
47699afd85c9SLinus Lüssing 		return skb;
47709afd85c9SLinus Lüssing 
47719afd85c9SLinus Lüssing 	skb_chk = skb_clone(skb, GFP_ATOMIC);
47729afd85c9SLinus Lüssing 	if (!skb_chk)
47739afd85c9SLinus Lüssing 		return NULL;
47749afd85c9SLinus Lüssing 
47759afd85c9SLinus Lüssing 	ret = pskb_trim_rcsum(skb_chk, len);
47769afd85c9SLinus Lüssing 	if (ret) {
47779afd85c9SLinus Lüssing 		kfree_skb(skb_chk);
47789afd85c9SLinus Lüssing 		return NULL;
47799afd85c9SLinus Lüssing 	}
47809afd85c9SLinus Lüssing 
47819afd85c9SLinus Lüssing 	return skb_chk;
47829afd85c9SLinus Lüssing }
47839afd85c9SLinus Lüssing 
47849afd85c9SLinus Lüssing /**
47859afd85c9SLinus Lüssing  * skb_checksum_trimmed - validate checksum of an skb
47869afd85c9SLinus Lüssing  * @skb: the skb to check
47879afd85c9SLinus Lüssing  * @transport_len: the data length beyond the network header
47889afd85c9SLinus Lüssing  * @skb_chkf: checksum function to use
47899afd85c9SLinus Lüssing  *
47909afd85c9SLinus Lüssing  * Applies the given checksum function skb_chkf to the provided skb.
47919afd85c9SLinus Lüssing  * Returns a checked and maybe trimmed skb. Returns NULL on error.
47929afd85c9SLinus Lüssing  *
47939afd85c9SLinus Lüssing  * If the skb has data beyond the given transport length, then a
47949afd85c9SLinus Lüssing  * trimmed & cloned skb is checked and returned.
47959afd85c9SLinus Lüssing  *
4796a516993fSLinus Lüssing  * Caller needs to set the skb transport header and free any returned skb if it
4797a516993fSLinus Lüssing  * differs from the provided skb.
47989afd85c9SLinus Lüssing  */
47999afd85c9SLinus Lüssing struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
48009afd85c9SLinus Lüssing 				     unsigned int transport_len,
48019afd85c9SLinus Lüssing 				     __sum16(*skb_chkf)(struct sk_buff *skb))
48029afd85c9SLinus Lüssing {
48039afd85c9SLinus Lüssing 	struct sk_buff *skb_chk;
48049afd85c9SLinus Lüssing 	unsigned int offset = skb_transport_offset(skb);
4805fcba67c9SLinus Lüssing 	__sum16 ret;
48069afd85c9SLinus Lüssing 
48079afd85c9SLinus Lüssing 	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
48089afd85c9SLinus Lüssing 	if (!skb_chk)
4809a516993fSLinus Lüssing 		goto err;
48109afd85c9SLinus Lüssing 
4811a516993fSLinus Lüssing 	if (!pskb_may_pull(skb_chk, offset))
4812a516993fSLinus Lüssing 		goto err;
48139afd85c9SLinus Lüssing 
48149b368814SLinus Lüssing 	skb_pull_rcsum(skb_chk, offset);
48159afd85c9SLinus Lüssing 	ret = skb_chkf(skb_chk);
48169b368814SLinus Lüssing 	skb_push_rcsum(skb_chk, offset);
48179afd85c9SLinus Lüssing 
4818a516993fSLinus Lüssing 	if (ret)
4819a516993fSLinus Lüssing 		goto err;
48209afd85c9SLinus Lüssing 
48219afd85c9SLinus Lüssing 	return skb_chk;
4822a516993fSLinus Lüssing 
4823a516993fSLinus Lüssing err:
4824a516993fSLinus Lüssing 	if (skb_chk && skb_chk != skb)
4825a516993fSLinus Lüssing 		kfree_skb(skb_chk);
4826a516993fSLinus Lüssing 
4827a516993fSLinus Lüssing 	return NULL;
4828a516993fSLinus Lüssing 
48299afd85c9SLinus Lüssing }
48309afd85c9SLinus Lüssing EXPORT_SYMBOL(skb_checksum_trimmed);
48319afd85c9SLinus Lüssing 
48324497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb)
48334497b076SBen Hutchings {
4834e87cc472SJoe Perches 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4835e87cc472SJoe Perches 			     skb->dev->name);
48364497b076SBen Hutchings }
48374497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4838bad43ca8SEric Dumazet 
4839bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4840bad43ca8SEric Dumazet {
48413d861f66SEric Dumazet 	if (head_stolen) {
48423d861f66SEric Dumazet 		skb_release_head_state(skb);
4843bad43ca8SEric Dumazet 		kmem_cache_free(skbuff_head_cache, skb);
48443d861f66SEric Dumazet 	} else {
4845bad43ca8SEric Dumazet 		__kfree_skb(skb);
4846bad43ca8SEric Dumazet 	}
48473d861f66SEric Dumazet }
4848bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial);
4849bad43ca8SEric Dumazet 
4850bad43ca8SEric Dumazet /**
4851bad43ca8SEric Dumazet  * skb_try_coalesce - try to merge skb to prior one
4852bad43ca8SEric Dumazet  * @to: prior buffer
4853bad43ca8SEric Dumazet  * @from: buffer to add
4854bad43ca8SEric Dumazet  * @fragstolen: pointer to boolean
4855c6c4b97cSRandy Dunlap  * @delta_truesize: how much more was allocated than was requested
4856bad43ca8SEric Dumazet  */
4857bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4858bad43ca8SEric Dumazet 		      bool *fragstolen, int *delta_truesize)
4859bad43ca8SEric Dumazet {
4860c818fa9eSEric Dumazet 	struct skb_shared_info *to_shinfo, *from_shinfo;
4861bad43ca8SEric Dumazet 	int i, delta, len = from->len;
4862bad43ca8SEric Dumazet 
4863bad43ca8SEric Dumazet 	*fragstolen = false;
4864bad43ca8SEric Dumazet 
4865bad43ca8SEric Dumazet 	if (skb_cloned(to))
4866bad43ca8SEric Dumazet 		return false;
4867bad43ca8SEric Dumazet 
4868bad43ca8SEric Dumazet 	if (len <= skb_tailroom(to)) {
4869e93a0435SEric Dumazet 		if (len)
4870bad43ca8SEric Dumazet 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4871bad43ca8SEric Dumazet 		*delta_truesize = 0;
4872bad43ca8SEric Dumazet 		return true;
4873bad43ca8SEric Dumazet 	}
4874bad43ca8SEric Dumazet 
4875c818fa9eSEric Dumazet 	to_shinfo = skb_shinfo(to);
4876c818fa9eSEric Dumazet 	from_shinfo = skb_shinfo(from);
4877c818fa9eSEric Dumazet 	if (to_shinfo->frag_list || from_shinfo->frag_list)
4878bad43ca8SEric Dumazet 		return false;
48791f8b977aSWillem de Bruijn 	if (skb_zcopy(to) || skb_zcopy(from))
48801f8b977aSWillem de Bruijn 		return false;
4881bad43ca8SEric Dumazet 
4882bad43ca8SEric Dumazet 	if (skb_headlen(from) != 0) {
4883bad43ca8SEric Dumazet 		struct page *page;
4884bad43ca8SEric Dumazet 		unsigned int offset;
4885bad43ca8SEric Dumazet 
4886c818fa9eSEric Dumazet 		if (to_shinfo->nr_frags +
4887c818fa9eSEric Dumazet 		    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
4888bad43ca8SEric Dumazet 			return false;
4889bad43ca8SEric Dumazet 
4890bad43ca8SEric Dumazet 		if (skb_head_is_locked(from))
4891bad43ca8SEric Dumazet 			return false;
4892bad43ca8SEric Dumazet 
4893bad43ca8SEric Dumazet 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4894bad43ca8SEric Dumazet 
4895bad43ca8SEric Dumazet 		page = virt_to_head_page(from->head);
4896bad43ca8SEric Dumazet 		offset = from->data - (unsigned char *)page_address(page);
4897bad43ca8SEric Dumazet 
4898c818fa9eSEric Dumazet 		skb_fill_page_desc(to, to_shinfo->nr_frags,
4899bad43ca8SEric Dumazet 				   page, offset, skb_headlen(from));
4900bad43ca8SEric Dumazet 		*fragstolen = true;
4901bad43ca8SEric Dumazet 	} else {
4902c818fa9eSEric Dumazet 		if (to_shinfo->nr_frags +
4903c818fa9eSEric Dumazet 		    from_shinfo->nr_frags > MAX_SKB_FRAGS)
4904bad43ca8SEric Dumazet 			return false;
4905bad43ca8SEric Dumazet 
4906f4b549a5SWeiping Pan 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4907bad43ca8SEric Dumazet 	}
4908bad43ca8SEric Dumazet 
4909bad43ca8SEric Dumazet 	WARN_ON_ONCE(delta < len);
4910bad43ca8SEric Dumazet 
4911c818fa9eSEric Dumazet 	memcpy(to_shinfo->frags + to_shinfo->nr_frags,
4912c818fa9eSEric Dumazet 	       from_shinfo->frags,
4913c818fa9eSEric Dumazet 	       from_shinfo->nr_frags * sizeof(skb_frag_t));
4914c818fa9eSEric Dumazet 	to_shinfo->nr_frags += from_shinfo->nr_frags;
4915bad43ca8SEric Dumazet 
4916bad43ca8SEric Dumazet 	if (!skb_cloned(from))
4917c818fa9eSEric Dumazet 		from_shinfo->nr_frags = 0;
4918bad43ca8SEric Dumazet 
49198ea853fdSLi RongQing 	/* if the skb is not cloned this does nothing
49208ea853fdSLi RongQing 	 * since we set nr_frags to 0.
49218ea853fdSLi RongQing 	 */
4922c818fa9eSEric Dumazet 	for (i = 0; i < from_shinfo->nr_frags; i++)
4923c818fa9eSEric Dumazet 		__skb_frag_ref(&from_shinfo->frags[i]);
4924bad43ca8SEric Dumazet 
4925bad43ca8SEric Dumazet 	to->truesize += delta;
4926bad43ca8SEric Dumazet 	to->len += len;
4927bad43ca8SEric Dumazet 	to->data_len += len;
4928bad43ca8SEric Dumazet 
4929bad43ca8SEric Dumazet 	*delta_truesize = delta;
4930bad43ca8SEric Dumazet 	return true;
4931bad43ca8SEric Dumazet }
4932bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce);
4933621e84d6SNicolas Dichtel 
4934621e84d6SNicolas Dichtel /**
49358b27f277SNicolas Dichtel  * skb_scrub_packet - scrub an skb
4936621e84d6SNicolas Dichtel  *
4937621e84d6SNicolas Dichtel  * @skb: buffer to clean
49388b27f277SNicolas Dichtel  * @xnet: packet is crossing netns
4939621e84d6SNicolas Dichtel  *
49408b27f277SNicolas Dichtel  * skb_scrub_packet can be used after encapsulating or decapsulting a packet
49418b27f277SNicolas Dichtel  * into/from a tunnel. Some information have to be cleared during these
49428b27f277SNicolas Dichtel  * operations.
49438b27f277SNicolas Dichtel  * skb_scrub_packet can also be used to clean a skb before injecting it in
49448b27f277SNicolas Dichtel  * another namespace (@xnet == true). We have to clear all information in the
49458b27f277SNicolas Dichtel  * skb that could impact namespace isolation.
4946621e84d6SNicolas Dichtel  */
49478b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4948621e84d6SNicolas Dichtel {
4949621e84d6SNicolas Dichtel 	skb->pkt_type = PACKET_HOST;
4950621e84d6SNicolas Dichtel 	skb->skb_iif = 0;
495160ff7467SWANG Cong 	skb->ignore_df = 0;
4952621e84d6SNicolas Dichtel 	skb_dst_drop(skb);
4953621e84d6SNicolas Dichtel 	secpath_reset(skb);
4954621e84d6SNicolas Dichtel 	nf_reset(skb);
4955621e84d6SNicolas Dichtel 	nf_reset_trace(skb);
4956213dd74aSHerbert Xu 
49576f9a5069SPetr Machata #ifdef CONFIG_NET_SWITCHDEV
49586f9a5069SPetr Machata 	skb->offload_fwd_mark = 0;
4959875e8939SIdo Schimmel 	skb->offload_l3_fwd_mark = 0;
49606f9a5069SPetr Machata #endif
49616f9a5069SPetr Machata 
4962213dd74aSHerbert Xu 	if (!xnet)
4963213dd74aSHerbert Xu 		return;
4964213dd74aSHerbert Xu 
49652b5ec1a5SYe Yin 	ipvs_reset(skb);
4966213dd74aSHerbert Xu 	skb->mark = 0;
4967c47d8c2fSJesus Sanchez-Palencia 	skb->tstamp = 0;
4968621e84d6SNicolas Dichtel }
4969621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet);
4970de960aa9SFlorian Westphal 
4971de960aa9SFlorian Westphal /**
4972de960aa9SFlorian Westphal  * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4973de960aa9SFlorian Westphal  *
4974de960aa9SFlorian Westphal  * @skb: GSO skb
4975de960aa9SFlorian Westphal  *
4976de960aa9SFlorian Westphal  * skb_gso_transport_seglen is used to determine the real size of the
4977de960aa9SFlorian Westphal  * individual segments, including Layer4 headers (TCP/UDP).
4978de960aa9SFlorian Westphal  *
4979de960aa9SFlorian Westphal  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4980de960aa9SFlorian Westphal  */
4981a4a77718SDaniel Axtens static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4982de960aa9SFlorian Westphal {
4983de960aa9SFlorian Westphal 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
4984f993bc25SFlorian Westphal 	unsigned int thlen = 0;
4985f993bc25SFlorian Westphal 
4986f993bc25SFlorian Westphal 	if (skb->encapsulation) {
4987f993bc25SFlorian Westphal 		thlen = skb_inner_transport_header(skb) -
4988f993bc25SFlorian Westphal 			skb_transport_header(skb);
4989de960aa9SFlorian Westphal 
4990de960aa9SFlorian Westphal 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4991f993bc25SFlorian Westphal 			thlen += inner_tcp_hdrlen(skb);
4992f993bc25SFlorian Westphal 	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4993f993bc25SFlorian Westphal 		thlen = tcp_hdrlen(skb);
49941dd27cdeSDaniel Axtens 	} else if (unlikely(skb_is_gso_sctp(skb))) {
499590017accSMarcelo Ricardo Leitner 		thlen = sizeof(struct sctphdr);
4996ee80d1ebSWillem de Bruijn 	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
4997ee80d1ebSWillem de Bruijn 		thlen = sizeof(struct udphdr);
4998f993bc25SFlorian Westphal 	}
49996d39d589SFlorian Westphal 	/* UFO sets gso_size to the size of the fragmentation
50006d39d589SFlorian Westphal 	 * payload, i.e. the size of the L4 (UDP) header is already
50016d39d589SFlorian Westphal 	 * accounted for.
50026d39d589SFlorian Westphal 	 */
5003f993bc25SFlorian Westphal 	return thlen + shinfo->gso_size;
5004de960aa9SFlorian Westphal }
5005a4a77718SDaniel Axtens 
5006a4a77718SDaniel Axtens /**
5007a4a77718SDaniel Axtens  * skb_gso_network_seglen - Return length of individual segments of a gso packet
5008a4a77718SDaniel Axtens  *
5009a4a77718SDaniel Axtens  * @skb: GSO skb
5010a4a77718SDaniel Axtens  *
5011a4a77718SDaniel Axtens  * skb_gso_network_seglen is used to determine the real size of the
5012a4a77718SDaniel Axtens  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5013a4a77718SDaniel Axtens  *
5014a4a77718SDaniel Axtens  * The MAC/L2 header is not accounted for.
5015a4a77718SDaniel Axtens  */
5016a4a77718SDaniel Axtens static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5017a4a77718SDaniel Axtens {
5018a4a77718SDaniel Axtens 	unsigned int hdr_len = skb_transport_header(skb) -
5019a4a77718SDaniel Axtens 			       skb_network_header(skb);
5020a4a77718SDaniel Axtens 
5021a4a77718SDaniel Axtens 	return hdr_len + skb_gso_transport_seglen(skb);
5022a4a77718SDaniel Axtens }
5023a4a77718SDaniel Axtens 
5024a4a77718SDaniel Axtens /**
5025a4a77718SDaniel Axtens  * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5026a4a77718SDaniel Axtens  *
5027a4a77718SDaniel Axtens  * @skb: GSO skb
5028a4a77718SDaniel Axtens  *
5029a4a77718SDaniel Axtens  * skb_gso_mac_seglen is used to determine the real size of the
5030a4a77718SDaniel Axtens  * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5031a4a77718SDaniel Axtens  * headers (TCP/UDP).
5032a4a77718SDaniel Axtens  */
5033a4a77718SDaniel Axtens static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5034a4a77718SDaniel Axtens {
5035a4a77718SDaniel Axtens 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5036a4a77718SDaniel Axtens 
5037a4a77718SDaniel Axtens 	return hdr_len + skb_gso_transport_seglen(skb);
5038a4a77718SDaniel Axtens }
50390d5501c1SVlad Yasevich 
5040ae7ef81eSMarcelo Ricardo Leitner /**
50412b16f048SDaniel Axtens  * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
50422b16f048SDaniel Axtens  *
50432b16f048SDaniel Axtens  * There are a couple of instances where we have a GSO skb, and we
50442b16f048SDaniel Axtens  * want to determine what size it would be after it is segmented.
50452b16f048SDaniel Axtens  *
50462b16f048SDaniel Axtens  * We might want to check:
50472b16f048SDaniel Axtens  * -    L3+L4+payload size (e.g. IP forwarding)
50482b16f048SDaniel Axtens  * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
50492b16f048SDaniel Axtens  *
50502b16f048SDaniel Axtens  * This is a helper to do that correctly considering GSO_BY_FRAGS.
50512b16f048SDaniel Axtens  *
505249682bfaSMathieu Malaterre  * @skb: GSO skb
505349682bfaSMathieu Malaterre  *
50542b16f048SDaniel Axtens  * @seg_len: The segmented length (from skb_gso_*_seglen). In the
50552b16f048SDaniel Axtens  *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
50562b16f048SDaniel Axtens  *
50572b16f048SDaniel Axtens  * @max_len: The maximum permissible length.
50582b16f048SDaniel Axtens  *
50592b16f048SDaniel Axtens  * Returns true if the segmented length <= max length.
50602b16f048SDaniel Axtens  */
50612b16f048SDaniel Axtens static inline bool skb_gso_size_check(const struct sk_buff *skb,
50622b16f048SDaniel Axtens 				      unsigned int seg_len,
50632b16f048SDaniel Axtens 				      unsigned int max_len) {
50642b16f048SDaniel Axtens 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
50652b16f048SDaniel Axtens 	const struct sk_buff *iter;
50662b16f048SDaniel Axtens 
50672b16f048SDaniel Axtens 	if (shinfo->gso_size != GSO_BY_FRAGS)
50682b16f048SDaniel Axtens 		return seg_len <= max_len;
50692b16f048SDaniel Axtens 
50702b16f048SDaniel Axtens 	/* Undo this so we can re-use header sizes */
50712b16f048SDaniel Axtens 	seg_len -= GSO_BY_FRAGS;
50722b16f048SDaniel Axtens 
50732b16f048SDaniel Axtens 	skb_walk_frags(skb, iter) {
50742b16f048SDaniel Axtens 		if (seg_len + skb_headlen(iter) > max_len)
50752b16f048SDaniel Axtens 			return false;
50762b16f048SDaniel Axtens 	}
50772b16f048SDaniel Axtens 
50782b16f048SDaniel Axtens 	return true;
50792b16f048SDaniel Axtens }
50802b16f048SDaniel Axtens 
50812b16f048SDaniel Axtens /**
5082779b7931SDaniel Axtens  * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5083ae7ef81eSMarcelo Ricardo Leitner  *
5084ae7ef81eSMarcelo Ricardo Leitner  * @skb: GSO skb
508576f21b99SDavid S. Miller  * @mtu: MTU to validate against
5086ae7ef81eSMarcelo Ricardo Leitner  *
5087779b7931SDaniel Axtens  * skb_gso_validate_network_len validates if a given skb will fit a
5088779b7931SDaniel Axtens  * wanted MTU once split. It considers L3 headers, L4 headers, and the
5089779b7931SDaniel Axtens  * payload.
5090ae7ef81eSMarcelo Ricardo Leitner  */
5091779b7931SDaniel Axtens bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5092ae7ef81eSMarcelo Ricardo Leitner {
50932b16f048SDaniel Axtens 	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5094ae7ef81eSMarcelo Ricardo Leitner }
5095779b7931SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5096ae7ef81eSMarcelo Ricardo Leitner 
50972b16f048SDaniel Axtens /**
50982b16f048SDaniel Axtens  * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
50992b16f048SDaniel Axtens  *
51002b16f048SDaniel Axtens  * @skb: GSO skb
51012b16f048SDaniel Axtens  * @len: length to validate against
51022b16f048SDaniel Axtens  *
51032b16f048SDaniel Axtens  * skb_gso_validate_mac_len validates if a given skb will fit a wanted
51042b16f048SDaniel Axtens  * length once split, including L2, L3 and L4 headers and the payload.
51052b16f048SDaniel Axtens  */
51062b16f048SDaniel Axtens bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
51072b16f048SDaniel Axtens {
51082b16f048SDaniel Axtens 	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
51092b16f048SDaniel Axtens }
51102b16f048SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
51112b16f048SDaniel Axtens 
51120d5501c1SVlad Yasevich static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
51130d5501c1SVlad Yasevich {
5114d85e8be2SYuya Kusakabe 	int mac_len, meta_len;
5115d85e8be2SYuya Kusakabe 	void *meta;
51164bbb3e0eSToshiaki Makita 
51170d5501c1SVlad Yasevich 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
51180d5501c1SVlad Yasevich 		kfree_skb(skb);
51190d5501c1SVlad Yasevich 		return NULL;
51200d5501c1SVlad Yasevich 	}
51210d5501c1SVlad Yasevich 
51224bbb3e0eSToshiaki Makita 	mac_len = skb->data - skb_mac_header(skb);
5123ae474573SToshiaki Makita 	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
51244bbb3e0eSToshiaki Makita 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
51254bbb3e0eSToshiaki Makita 			mac_len - VLAN_HLEN - ETH_TLEN);
5126ae474573SToshiaki Makita 	}
5127d85e8be2SYuya Kusakabe 
5128d85e8be2SYuya Kusakabe 	meta_len = skb_metadata_len(skb);
5129d85e8be2SYuya Kusakabe 	if (meta_len) {
5130d85e8be2SYuya Kusakabe 		meta = skb_metadata_end(skb) - meta_len;
5131d85e8be2SYuya Kusakabe 		memmove(meta + VLAN_HLEN, meta, meta_len);
5132d85e8be2SYuya Kusakabe 	}
5133d85e8be2SYuya Kusakabe 
51340d5501c1SVlad Yasevich 	skb->mac_header += VLAN_HLEN;
51350d5501c1SVlad Yasevich 	return skb;
51360d5501c1SVlad Yasevich }
51370d5501c1SVlad Yasevich 
51380d5501c1SVlad Yasevich struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
51390d5501c1SVlad Yasevich {
51400d5501c1SVlad Yasevich 	struct vlan_hdr *vhdr;
51410d5501c1SVlad Yasevich 	u16 vlan_tci;
51420d5501c1SVlad Yasevich 
5143df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
51440d5501c1SVlad Yasevich 		/* vlan_tci is already set-up so leave this for another time */
51450d5501c1SVlad Yasevich 		return skb;
51460d5501c1SVlad Yasevich 	}
51470d5501c1SVlad Yasevich 
51480d5501c1SVlad Yasevich 	skb = skb_share_check(skb, GFP_ATOMIC);
51490d5501c1SVlad Yasevich 	if (unlikely(!skb))
51500d5501c1SVlad Yasevich 		goto err_free;
51510d5501c1SVlad Yasevich 
51520d5501c1SVlad Yasevich 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
51530d5501c1SVlad Yasevich 		goto err_free;
51540d5501c1SVlad Yasevich 
51550d5501c1SVlad Yasevich 	vhdr = (struct vlan_hdr *)skb->data;
51560d5501c1SVlad Yasevich 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
51570d5501c1SVlad Yasevich 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
51580d5501c1SVlad Yasevich 
51590d5501c1SVlad Yasevich 	skb_pull_rcsum(skb, VLAN_HLEN);
51600d5501c1SVlad Yasevich 	vlan_set_encap_proto(skb, vhdr);
51610d5501c1SVlad Yasevich 
51620d5501c1SVlad Yasevich 	skb = skb_reorder_vlan_header(skb);
51630d5501c1SVlad Yasevich 	if (unlikely(!skb))
51640d5501c1SVlad Yasevich 		goto err_free;
51650d5501c1SVlad Yasevich 
51660d5501c1SVlad Yasevich 	skb_reset_network_header(skb);
51670d5501c1SVlad Yasevich 	skb_reset_transport_header(skb);
51680d5501c1SVlad Yasevich 	skb_reset_mac_len(skb);
51690d5501c1SVlad Yasevich 
51700d5501c1SVlad Yasevich 	return skb;
51710d5501c1SVlad Yasevich 
51720d5501c1SVlad Yasevich err_free:
51730d5501c1SVlad Yasevich 	kfree_skb(skb);
51740d5501c1SVlad Yasevich 	return NULL;
51750d5501c1SVlad Yasevich }
51760d5501c1SVlad Yasevich EXPORT_SYMBOL(skb_vlan_untag);
51772e4e4410SEric Dumazet 
5178e2195121SJiri Pirko int skb_ensure_writable(struct sk_buff *skb, int write_len)
5179e2195121SJiri Pirko {
5180e2195121SJiri Pirko 	if (!pskb_may_pull(skb, write_len))
5181e2195121SJiri Pirko 		return -ENOMEM;
5182e2195121SJiri Pirko 
5183e2195121SJiri Pirko 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5184e2195121SJiri Pirko 		return 0;
5185e2195121SJiri Pirko 
5186e2195121SJiri Pirko 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5187e2195121SJiri Pirko }
5188e2195121SJiri Pirko EXPORT_SYMBOL(skb_ensure_writable);
5189e2195121SJiri Pirko 
5190bfca4c52SShmulik Ladkani /* remove VLAN header from packet and update csum accordingly.
5191bfca4c52SShmulik Ladkani  * expects a non skb_vlan_tag_present skb with a vlan tag payload
5192bfca4c52SShmulik Ladkani  */
5193bfca4c52SShmulik Ladkani int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
519493515d53SJiri Pirko {
519593515d53SJiri Pirko 	struct vlan_hdr *vhdr;
5196b6a79208SShmulik Ladkani 	int offset = skb->data - skb_mac_header(skb);
519793515d53SJiri Pirko 	int err;
519893515d53SJiri Pirko 
5199b6a79208SShmulik Ladkani 	if (WARN_ONCE(offset,
5200b6a79208SShmulik Ladkani 		      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5201b6a79208SShmulik Ladkani 		      offset)) {
5202b6a79208SShmulik Ladkani 		return -EINVAL;
5203b6a79208SShmulik Ladkani 	}
5204b6a79208SShmulik Ladkani 
520593515d53SJiri Pirko 	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
520693515d53SJiri Pirko 	if (unlikely(err))
5207b6a79208SShmulik Ladkani 		return err;
520893515d53SJiri Pirko 
520993515d53SJiri Pirko 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
521093515d53SJiri Pirko 
521193515d53SJiri Pirko 	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
521293515d53SJiri Pirko 	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
521393515d53SJiri Pirko 
521493515d53SJiri Pirko 	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
521593515d53SJiri Pirko 	__skb_pull(skb, VLAN_HLEN);
521693515d53SJiri Pirko 
521793515d53SJiri Pirko 	vlan_set_encap_proto(skb, vhdr);
521893515d53SJiri Pirko 	skb->mac_header += VLAN_HLEN;
521993515d53SJiri Pirko 
522093515d53SJiri Pirko 	if (skb_network_offset(skb) < ETH_HLEN)
522193515d53SJiri Pirko 		skb_set_network_header(skb, ETH_HLEN);
522293515d53SJiri Pirko 
522393515d53SJiri Pirko 	skb_reset_mac_len(skb);
522493515d53SJiri Pirko 
522593515d53SJiri Pirko 	return err;
522693515d53SJiri Pirko }
5227bfca4c52SShmulik Ladkani EXPORT_SYMBOL(__skb_vlan_pop);
522893515d53SJiri Pirko 
5229b6a79208SShmulik Ladkani /* Pop a vlan tag either from hwaccel or from payload.
5230b6a79208SShmulik Ladkani  * Expects skb->data at mac header.
5231b6a79208SShmulik Ladkani  */
523293515d53SJiri Pirko int skb_vlan_pop(struct sk_buff *skb)
523393515d53SJiri Pirko {
523493515d53SJiri Pirko 	u16 vlan_tci;
523593515d53SJiri Pirko 	__be16 vlan_proto;
523693515d53SJiri Pirko 	int err;
523793515d53SJiri Pirko 
5238df8a39deSJiri Pirko 	if (likely(skb_vlan_tag_present(skb))) {
5239b1817524SMichał Mirosław 		__vlan_hwaccel_clear_tag(skb);
524093515d53SJiri Pirko 	} else {
5241ecf4ee41SShmulik Ladkani 		if (unlikely(!eth_type_vlan(skb->protocol)))
524293515d53SJiri Pirko 			return 0;
524393515d53SJiri Pirko 
524493515d53SJiri Pirko 		err = __skb_vlan_pop(skb, &vlan_tci);
524593515d53SJiri Pirko 		if (err)
524693515d53SJiri Pirko 			return err;
524793515d53SJiri Pirko 	}
524893515d53SJiri Pirko 	/* move next vlan tag to hw accel tag */
5249ecf4ee41SShmulik Ladkani 	if (likely(!eth_type_vlan(skb->protocol)))
525093515d53SJiri Pirko 		return 0;
525193515d53SJiri Pirko 
525293515d53SJiri Pirko 	vlan_proto = skb->protocol;
525393515d53SJiri Pirko 	err = __skb_vlan_pop(skb, &vlan_tci);
525493515d53SJiri Pirko 	if (unlikely(err))
525593515d53SJiri Pirko 		return err;
525693515d53SJiri Pirko 
525793515d53SJiri Pirko 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
525893515d53SJiri Pirko 	return 0;
525993515d53SJiri Pirko }
526093515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_pop);
526193515d53SJiri Pirko 
5262b6a79208SShmulik Ladkani /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5263b6a79208SShmulik Ladkani  * Expects skb->data at mac header.
5264b6a79208SShmulik Ladkani  */
526593515d53SJiri Pirko int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
526693515d53SJiri Pirko {
5267df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
5268b6a79208SShmulik Ladkani 		int offset = skb->data - skb_mac_header(skb);
526993515d53SJiri Pirko 		int err;
527093515d53SJiri Pirko 
5271b6a79208SShmulik Ladkani 		if (WARN_ONCE(offset,
5272b6a79208SShmulik Ladkani 			      "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5273b6a79208SShmulik Ladkani 			      offset)) {
5274b6a79208SShmulik Ladkani 			return -EINVAL;
5275b6a79208SShmulik Ladkani 		}
5276b6a79208SShmulik Ladkani 
527793515d53SJiri Pirko 		err = __vlan_insert_tag(skb, skb->vlan_proto,
5278df8a39deSJiri Pirko 					skb_vlan_tag_get(skb));
5279b6a79208SShmulik Ladkani 		if (err)
528093515d53SJiri Pirko 			return err;
52819241e2dfSDaniel Borkmann 
528293515d53SJiri Pirko 		skb->protocol = skb->vlan_proto;
528393515d53SJiri Pirko 		skb->mac_len += VLAN_HLEN;
528493515d53SJiri Pirko 
52856b83d28aSDaniel Borkmann 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
528693515d53SJiri Pirko 	}
528793515d53SJiri Pirko 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
528893515d53SJiri Pirko 	return 0;
528993515d53SJiri Pirko }
529093515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_push);
529193515d53SJiri Pirko 
52922e4e4410SEric Dumazet /**
52932e4e4410SEric Dumazet  * alloc_skb_with_frags - allocate skb with page frags
52942e4e4410SEric Dumazet  *
5295de3f0d0eSMasanari Iida  * @header_len: size of linear part
5296de3f0d0eSMasanari Iida  * @data_len: needed length in frags
5297de3f0d0eSMasanari Iida  * @max_page_order: max page order desired.
5298de3f0d0eSMasanari Iida  * @errcode: pointer to error code if any
5299de3f0d0eSMasanari Iida  * @gfp_mask: allocation mask
53002e4e4410SEric Dumazet  *
53012e4e4410SEric Dumazet  * This can be used to allocate a paged skb, given a maximal order for frags.
53022e4e4410SEric Dumazet  */
53032e4e4410SEric Dumazet struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
53042e4e4410SEric Dumazet 				     unsigned long data_len,
53052e4e4410SEric Dumazet 				     int max_page_order,
53062e4e4410SEric Dumazet 				     int *errcode,
53072e4e4410SEric Dumazet 				     gfp_t gfp_mask)
53082e4e4410SEric Dumazet {
53092e4e4410SEric Dumazet 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
53102e4e4410SEric Dumazet 	unsigned long chunk;
53112e4e4410SEric Dumazet 	struct sk_buff *skb;
53122e4e4410SEric Dumazet 	struct page *page;
53132e4e4410SEric Dumazet 	int i;
53142e4e4410SEric Dumazet 
53152e4e4410SEric Dumazet 	*errcode = -EMSGSIZE;
53162e4e4410SEric Dumazet 	/* Note this test could be relaxed, if we succeed to allocate
53172e4e4410SEric Dumazet 	 * high order pages...
53182e4e4410SEric Dumazet 	 */
53192e4e4410SEric Dumazet 	if (npages > MAX_SKB_FRAGS)
53202e4e4410SEric Dumazet 		return NULL;
53212e4e4410SEric Dumazet 
53222e4e4410SEric Dumazet 	*errcode = -ENOBUFS;
5323f8c468e8SDavid Rientjes 	skb = alloc_skb(header_len, gfp_mask);
53242e4e4410SEric Dumazet 	if (!skb)
53252e4e4410SEric Dumazet 		return NULL;
53262e4e4410SEric Dumazet 
53272e4e4410SEric Dumazet 	skb->truesize += npages << PAGE_SHIFT;
53282e4e4410SEric Dumazet 
53292e4e4410SEric Dumazet 	for (i = 0; npages > 0; i++) {
53302e4e4410SEric Dumazet 		int order = max_page_order;
53312e4e4410SEric Dumazet 
53322e4e4410SEric Dumazet 		while (order) {
53332e4e4410SEric Dumazet 			if (npages >= 1 << order) {
5334d0164adcSMel Gorman 				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
53352e4e4410SEric Dumazet 						   __GFP_COMP |
5336d14b56f5SMichal Hocko 						   __GFP_NOWARN,
53372e4e4410SEric Dumazet 						   order);
53382e4e4410SEric Dumazet 				if (page)
53392e4e4410SEric Dumazet 					goto fill_page;
53402e4e4410SEric Dumazet 				/* Do not retry other high order allocations */
53412e4e4410SEric Dumazet 				order = 1;
53422e4e4410SEric Dumazet 				max_page_order = 0;
53432e4e4410SEric Dumazet 			}
53442e4e4410SEric Dumazet 			order--;
53452e4e4410SEric Dumazet 		}
53462e4e4410SEric Dumazet 		page = alloc_page(gfp_mask);
53472e4e4410SEric Dumazet 		if (!page)
53482e4e4410SEric Dumazet 			goto failure;
53492e4e4410SEric Dumazet fill_page:
53502e4e4410SEric Dumazet 		chunk = min_t(unsigned long, data_len,
53512e4e4410SEric Dumazet 			      PAGE_SIZE << order);
53522e4e4410SEric Dumazet 		skb_fill_page_desc(skb, i, page, 0, chunk);
53532e4e4410SEric Dumazet 		data_len -= chunk;
53542e4e4410SEric Dumazet 		npages -= 1 << order;
53552e4e4410SEric Dumazet 	}
53562e4e4410SEric Dumazet 	return skb;
53572e4e4410SEric Dumazet 
53582e4e4410SEric Dumazet failure:
53592e4e4410SEric Dumazet 	kfree_skb(skb);
53602e4e4410SEric Dumazet 	return NULL;
53612e4e4410SEric Dumazet }
53622e4e4410SEric Dumazet EXPORT_SYMBOL(alloc_skb_with_frags);
53636fa01ccdSSowmini Varadhan 
53646fa01ccdSSowmini Varadhan /* carve out the first off bytes from skb when off < headlen */
53656fa01ccdSSowmini Varadhan static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
53666fa01ccdSSowmini Varadhan 				    const int headlen, gfp_t gfp_mask)
53676fa01ccdSSowmini Varadhan {
53686fa01ccdSSowmini Varadhan 	int i;
53696fa01ccdSSowmini Varadhan 	int size = skb_end_offset(skb);
53706fa01ccdSSowmini Varadhan 	int new_hlen = headlen - off;
53716fa01ccdSSowmini Varadhan 	u8 *data;
53726fa01ccdSSowmini Varadhan 
53736fa01ccdSSowmini Varadhan 	size = SKB_DATA_ALIGN(size);
53746fa01ccdSSowmini Varadhan 
53756fa01ccdSSowmini Varadhan 	if (skb_pfmemalloc(skb))
53766fa01ccdSSowmini Varadhan 		gfp_mask |= __GFP_MEMALLOC;
53776fa01ccdSSowmini Varadhan 	data = kmalloc_reserve(size +
53786fa01ccdSSowmini Varadhan 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
53796fa01ccdSSowmini Varadhan 			       gfp_mask, NUMA_NO_NODE, NULL);
53806fa01ccdSSowmini Varadhan 	if (!data)
53816fa01ccdSSowmini Varadhan 		return -ENOMEM;
53826fa01ccdSSowmini Varadhan 
53836fa01ccdSSowmini Varadhan 	size = SKB_WITH_OVERHEAD(ksize(data));
53846fa01ccdSSowmini Varadhan 
53856fa01ccdSSowmini Varadhan 	/* Copy real data, and all frags */
53866fa01ccdSSowmini Varadhan 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
53876fa01ccdSSowmini Varadhan 	skb->len -= off;
53886fa01ccdSSowmini Varadhan 
53896fa01ccdSSowmini Varadhan 	memcpy((struct skb_shared_info *)(data + size),
53906fa01ccdSSowmini Varadhan 	       skb_shinfo(skb),
53916fa01ccdSSowmini Varadhan 	       offsetof(struct skb_shared_info,
53926fa01ccdSSowmini Varadhan 			frags[skb_shinfo(skb)->nr_frags]));
53936fa01ccdSSowmini Varadhan 	if (skb_cloned(skb)) {
53946fa01ccdSSowmini Varadhan 		/* drop the old head gracefully */
53956fa01ccdSSowmini Varadhan 		if (skb_orphan_frags(skb, gfp_mask)) {
53966fa01ccdSSowmini Varadhan 			kfree(data);
53976fa01ccdSSowmini Varadhan 			return -ENOMEM;
53986fa01ccdSSowmini Varadhan 		}
53996fa01ccdSSowmini Varadhan 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
54006fa01ccdSSowmini Varadhan 			skb_frag_ref(skb, i);
54016fa01ccdSSowmini Varadhan 		if (skb_has_frag_list(skb))
54026fa01ccdSSowmini Varadhan 			skb_clone_fraglist(skb);
54036fa01ccdSSowmini Varadhan 		skb_release_data(skb);
54046fa01ccdSSowmini Varadhan 	} else {
54056fa01ccdSSowmini Varadhan 		/* we can reuse existing recount- all we did was
54066fa01ccdSSowmini Varadhan 		 * relocate values
54076fa01ccdSSowmini Varadhan 		 */
54086fa01ccdSSowmini Varadhan 		skb_free_head(skb);
54096fa01ccdSSowmini Varadhan 	}
54106fa01ccdSSowmini Varadhan 
54116fa01ccdSSowmini Varadhan 	skb->head = data;
54126fa01ccdSSowmini Varadhan 	skb->data = data;
54136fa01ccdSSowmini Varadhan 	skb->head_frag = 0;
54146fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET
54156fa01ccdSSowmini Varadhan 	skb->end = size;
54166fa01ccdSSowmini Varadhan #else
54176fa01ccdSSowmini Varadhan 	skb->end = skb->head + size;
54186fa01ccdSSowmini Varadhan #endif
54196fa01ccdSSowmini Varadhan 	skb_set_tail_pointer(skb, skb_headlen(skb));
54206fa01ccdSSowmini Varadhan 	skb_headers_offset_update(skb, 0);
54216fa01ccdSSowmini Varadhan 	skb->cloned = 0;
54226fa01ccdSSowmini Varadhan 	skb->hdr_len = 0;
54236fa01ccdSSowmini Varadhan 	skb->nohdr = 0;
54246fa01ccdSSowmini Varadhan 	atomic_set(&skb_shinfo(skb)->dataref, 1);
54256fa01ccdSSowmini Varadhan 
54266fa01ccdSSowmini Varadhan 	return 0;
54276fa01ccdSSowmini Varadhan }
54286fa01ccdSSowmini Varadhan 
54296fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
54306fa01ccdSSowmini Varadhan 
54316fa01ccdSSowmini Varadhan /* carve out the first eat bytes from skb's frag_list. May recurse into
54326fa01ccdSSowmini Varadhan  * pskb_carve()
54336fa01ccdSSowmini Varadhan  */
54346fa01ccdSSowmini Varadhan static int pskb_carve_frag_list(struct sk_buff *skb,
54356fa01ccdSSowmini Varadhan 				struct skb_shared_info *shinfo, int eat,
54366fa01ccdSSowmini Varadhan 				gfp_t gfp_mask)
54376fa01ccdSSowmini Varadhan {
54386fa01ccdSSowmini Varadhan 	struct sk_buff *list = shinfo->frag_list;
54396fa01ccdSSowmini Varadhan 	struct sk_buff *clone = NULL;
54406fa01ccdSSowmini Varadhan 	struct sk_buff *insp = NULL;
54416fa01ccdSSowmini Varadhan 
54426fa01ccdSSowmini Varadhan 	do {
54436fa01ccdSSowmini Varadhan 		if (!list) {
54446fa01ccdSSowmini Varadhan 			pr_err("Not enough bytes to eat. Want %d\n", eat);
54456fa01ccdSSowmini Varadhan 			return -EFAULT;
54466fa01ccdSSowmini Varadhan 		}
54476fa01ccdSSowmini Varadhan 		if (list->len <= eat) {
54486fa01ccdSSowmini Varadhan 			/* Eaten as whole. */
54496fa01ccdSSowmini Varadhan 			eat -= list->len;
54506fa01ccdSSowmini Varadhan 			list = list->next;
54516fa01ccdSSowmini Varadhan 			insp = list;
54526fa01ccdSSowmini Varadhan 		} else {
54536fa01ccdSSowmini Varadhan 			/* Eaten partially. */
54546fa01ccdSSowmini Varadhan 			if (skb_shared(list)) {
54556fa01ccdSSowmini Varadhan 				clone = skb_clone(list, gfp_mask);
54566fa01ccdSSowmini Varadhan 				if (!clone)
54576fa01ccdSSowmini Varadhan 					return -ENOMEM;
54586fa01ccdSSowmini Varadhan 				insp = list->next;
54596fa01ccdSSowmini Varadhan 				list = clone;
54606fa01ccdSSowmini Varadhan 			} else {
54616fa01ccdSSowmini Varadhan 				/* This may be pulled without problems. */
54626fa01ccdSSowmini Varadhan 				insp = list;
54636fa01ccdSSowmini Varadhan 			}
54646fa01ccdSSowmini Varadhan 			if (pskb_carve(list, eat, gfp_mask) < 0) {
54656fa01ccdSSowmini Varadhan 				kfree_skb(clone);
54666fa01ccdSSowmini Varadhan 				return -ENOMEM;
54676fa01ccdSSowmini Varadhan 			}
54686fa01ccdSSowmini Varadhan 			break;
54696fa01ccdSSowmini Varadhan 		}
54706fa01ccdSSowmini Varadhan 	} while (eat);
54716fa01ccdSSowmini Varadhan 
54726fa01ccdSSowmini Varadhan 	/* Free pulled out fragments. */
54736fa01ccdSSowmini Varadhan 	while ((list = shinfo->frag_list) != insp) {
54746fa01ccdSSowmini Varadhan 		shinfo->frag_list = list->next;
54756fa01ccdSSowmini Varadhan 		kfree_skb(list);
54766fa01ccdSSowmini Varadhan 	}
54776fa01ccdSSowmini Varadhan 	/* And insert new clone at head. */
54786fa01ccdSSowmini Varadhan 	if (clone) {
54796fa01ccdSSowmini Varadhan 		clone->next = list;
54806fa01ccdSSowmini Varadhan 		shinfo->frag_list = clone;
54816fa01ccdSSowmini Varadhan 	}
54826fa01ccdSSowmini Varadhan 	return 0;
54836fa01ccdSSowmini Varadhan }
54846fa01ccdSSowmini Varadhan 
54856fa01ccdSSowmini Varadhan /* carve off first len bytes from skb. Split line (off) is in the
54866fa01ccdSSowmini Varadhan  * non-linear part of skb
54876fa01ccdSSowmini Varadhan  */
54886fa01ccdSSowmini Varadhan static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
54896fa01ccdSSowmini Varadhan 				       int pos, gfp_t gfp_mask)
54906fa01ccdSSowmini Varadhan {
54916fa01ccdSSowmini Varadhan 	int i, k = 0;
54926fa01ccdSSowmini Varadhan 	int size = skb_end_offset(skb);
54936fa01ccdSSowmini Varadhan 	u8 *data;
54946fa01ccdSSowmini Varadhan 	const int nfrags = skb_shinfo(skb)->nr_frags;
54956fa01ccdSSowmini Varadhan 	struct skb_shared_info *shinfo;
54966fa01ccdSSowmini Varadhan 
54976fa01ccdSSowmini Varadhan 	size = SKB_DATA_ALIGN(size);
54986fa01ccdSSowmini Varadhan 
54996fa01ccdSSowmini Varadhan 	if (skb_pfmemalloc(skb))
55006fa01ccdSSowmini Varadhan 		gfp_mask |= __GFP_MEMALLOC;
55016fa01ccdSSowmini Varadhan 	data = kmalloc_reserve(size +
55026fa01ccdSSowmini Varadhan 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
55036fa01ccdSSowmini Varadhan 			       gfp_mask, NUMA_NO_NODE, NULL);
55046fa01ccdSSowmini Varadhan 	if (!data)
55056fa01ccdSSowmini Varadhan 		return -ENOMEM;
55066fa01ccdSSowmini Varadhan 
55076fa01ccdSSowmini Varadhan 	size = SKB_WITH_OVERHEAD(ksize(data));
55086fa01ccdSSowmini Varadhan 
55096fa01ccdSSowmini Varadhan 	memcpy((struct skb_shared_info *)(data + size),
55106fa01ccdSSowmini Varadhan 	       skb_shinfo(skb), offsetof(struct skb_shared_info,
55116fa01ccdSSowmini Varadhan 					 frags[skb_shinfo(skb)->nr_frags]));
55126fa01ccdSSowmini Varadhan 	if (skb_orphan_frags(skb, gfp_mask)) {
55136fa01ccdSSowmini Varadhan 		kfree(data);
55146fa01ccdSSowmini Varadhan 		return -ENOMEM;
55156fa01ccdSSowmini Varadhan 	}
55166fa01ccdSSowmini Varadhan 	shinfo = (struct skb_shared_info *)(data + size);
55176fa01ccdSSowmini Varadhan 	for (i = 0; i < nfrags; i++) {
55186fa01ccdSSowmini Varadhan 		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
55196fa01ccdSSowmini Varadhan 
55206fa01ccdSSowmini Varadhan 		if (pos + fsize > off) {
55216fa01ccdSSowmini Varadhan 			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
55226fa01ccdSSowmini Varadhan 
55236fa01ccdSSowmini Varadhan 			if (pos < off) {
55246fa01ccdSSowmini Varadhan 				/* Split frag.
55256fa01ccdSSowmini Varadhan 				 * We have two variants in this case:
55266fa01ccdSSowmini Varadhan 				 * 1. Move all the frag to the second
55276fa01ccdSSowmini Varadhan 				 *    part, if it is possible. F.e.
55286fa01ccdSSowmini Varadhan 				 *    this approach is mandatory for TUX,
55296fa01ccdSSowmini Varadhan 				 *    where splitting is expensive.
55306fa01ccdSSowmini Varadhan 				 * 2. Split is accurately. We make this.
55316fa01ccdSSowmini Varadhan 				 */
55326fa01ccdSSowmini Varadhan 				shinfo->frags[0].page_offset += off - pos;
55336fa01ccdSSowmini Varadhan 				skb_frag_size_sub(&shinfo->frags[0], off - pos);
55346fa01ccdSSowmini Varadhan 			}
55356fa01ccdSSowmini Varadhan 			skb_frag_ref(skb, i);
55366fa01ccdSSowmini Varadhan 			k++;
55376fa01ccdSSowmini Varadhan 		}
55386fa01ccdSSowmini Varadhan 		pos += fsize;
55396fa01ccdSSowmini Varadhan 	}
55406fa01ccdSSowmini Varadhan 	shinfo->nr_frags = k;
55416fa01ccdSSowmini Varadhan 	if (skb_has_frag_list(skb))
55426fa01ccdSSowmini Varadhan 		skb_clone_fraglist(skb);
55436fa01ccdSSowmini Varadhan 
55446fa01ccdSSowmini Varadhan 	if (k == 0) {
55456fa01ccdSSowmini Varadhan 		/* split line is in frag list */
55466fa01ccdSSowmini Varadhan 		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
55476fa01ccdSSowmini Varadhan 	}
55486fa01ccdSSowmini Varadhan 	skb_release_data(skb);
55496fa01ccdSSowmini Varadhan 
55506fa01ccdSSowmini Varadhan 	skb->head = data;
55516fa01ccdSSowmini Varadhan 	skb->head_frag = 0;
55526fa01ccdSSowmini Varadhan 	skb->data = data;
55536fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET
55546fa01ccdSSowmini Varadhan 	skb->end = size;
55556fa01ccdSSowmini Varadhan #else
55566fa01ccdSSowmini Varadhan 	skb->end = skb->head + size;
55576fa01ccdSSowmini Varadhan #endif
55586fa01ccdSSowmini Varadhan 	skb_reset_tail_pointer(skb);
55596fa01ccdSSowmini Varadhan 	skb_headers_offset_update(skb, 0);
55606fa01ccdSSowmini Varadhan 	skb->cloned   = 0;
55616fa01ccdSSowmini Varadhan 	skb->hdr_len  = 0;
55626fa01ccdSSowmini Varadhan 	skb->nohdr    = 0;
55636fa01ccdSSowmini Varadhan 	skb->len -= off;
55646fa01ccdSSowmini Varadhan 	skb->data_len = skb->len;
55656fa01ccdSSowmini Varadhan 	atomic_set(&skb_shinfo(skb)->dataref, 1);
55666fa01ccdSSowmini Varadhan 	return 0;
55676fa01ccdSSowmini Varadhan }
55686fa01ccdSSowmini Varadhan 
55696fa01ccdSSowmini Varadhan /* remove len bytes from the beginning of the skb */
55706fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
55716fa01ccdSSowmini Varadhan {
55726fa01ccdSSowmini Varadhan 	int headlen = skb_headlen(skb);
55736fa01ccdSSowmini Varadhan 
55746fa01ccdSSowmini Varadhan 	if (len < headlen)
55756fa01ccdSSowmini Varadhan 		return pskb_carve_inside_header(skb, len, headlen, gfp);
55766fa01ccdSSowmini Varadhan 	else
55776fa01ccdSSowmini Varadhan 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
55786fa01ccdSSowmini Varadhan }
55796fa01ccdSSowmini Varadhan 
55806fa01ccdSSowmini Varadhan /* Extract to_copy bytes starting at off from skb, and return this in
55816fa01ccdSSowmini Varadhan  * a new skb
55826fa01ccdSSowmini Varadhan  */
55836fa01ccdSSowmini Varadhan struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
55846fa01ccdSSowmini Varadhan 			     int to_copy, gfp_t gfp)
55856fa01ccdSSowmini Varadhan {
55866fa01ccdSSowmini Varadhan 	struct sk_buff  *clone = skb_clone(skb, gfp);
55876fa01ccdSSowmini Varadhan 
55886fa01ccdSSowmini Varadhan 	if (!clone)
55896fa01ccdSSowmini Varadhan 		return NULL;
55906fa01ccdSSowmini Varadhan 
55916fa01ccdSSowmini Varadhan 	if (pskb_carve(clone, off, gfp) < 0 ||
55926fa01ccdSSowmini Varadhan 	    pskb_trim(clone, to_copy)) {
55936fa01ccdSSowmini Varadhan 		kfree_skb(clone);
55946fa01ccdSSowmini Varadhan 		return NULL;
55956fa01ccdSSowmini Varadhan 	}
55966fa01ccdSSowmini Varadhan 	return clone;
55976fa01ccdSSowmini Varadhan }
55986fa01ccdSSowmini Varadhan EXPORT_SYMBOL(pskb_extract);
5599c8c8b127SEric Dumazet 
5600c8c8b127SEric Dumazet /**
5601c8c8b127SEric Dumazet  * skb_condense - try to get rid of fragments/frag_list if possible
5602c8c8b127SEric Dumazet  * @skb: buffer
5603c8c8b127SEric Dumazet  *
5604c8c8b127SEric Dumazet  * Can be used to save memory before skb is added to a busy queue.
5605c8c8b127SEric Dumazet  * If packet has bytes in frags and enough tail room in skb->head,
5606c8c8b127SEric Dumazet  * pull all of them, so that we can free the frags right now and adjust
5607c8c8b127SEric Dumazet  * truesize.
5608c8c8b127SEric Dumazet  * Notes:
5609c8c8b127SEric Dumazet  *	We do not reallocate skb->head thus can not fail.
5610c8c8b127SEric Dumazet  *	Caller must re-evaluate skb->truesize if needed.
5611c8c8b127SEric Dumazet  */
5612c8c8b127SEric Dumazet void skb_condense(struct sk_buff *skb)
5613c8c8b127SEric Dumazet {
56143174fed9SEric Dumazet 	if (skb->data_len) {
56153174fed9SEric Dumazet 		if (skb->data_len > skb->end - skb->tail ||
5616c8c8b127SEric Dumazet 		    skb_cloned(skb))
5617c8c8b127SEric Dumazet 			return;
5618c8c8b127SEric Dumazet 
5619c8c8b127SEric Dumazet 		/* Nice, we can free page frag(s) right now */
5620c8c8b127SEric Dumazet 		__pskb_pull_tail(skb, skb->data_len);
56213174fed9SEric Dumazet 	}
56223174fed9SEric Dumazet 	/* At this point, skb->truesize might be over estimated,
56233174fed9SEric Dumazet 	 * because skb had a fragment, and fragments do not tell
56243174fed9SEric Dumazet 	 * their truesize.
56253174fed9SEric Dumazet 	 * When we pulled its content into skb->head, fragment
56263174fed9SEric Dumazet 	 * was freed, but __pskb_pull_tail() could not possibly
56273174fed9SEric Dumazet 	 * adjust skb->truesize, not knowing the frag truesize.
5628c8c8b127SEric Dumazet 	 */
5629c8c8b127SEric Dumazet 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5630c8c8b127SEric Dumazet }
5631df5042f4SFlorian Westphal 
5632df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
5633df5042f4SFlorian Westphal static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
5634df5042f4SFlorian Westphal {
5635df5042f4SFlorian Westphal 	return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
5636df5042f4SFlorian Westphal }
5637df5042f4SFlorian Westphal 
5638df5042f4SFlorian Westphal static struct skb_ext *skb_ext_alloc(void)
5639df5042f4SFlorian Westphal {
5640df5042f4SFlorian Westphal 	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
5641df5042f4SFlorian Westphal 
5642df5042f4SFlorian Westphal 	if (new) {
5643df5042f4SFlorian Westphal 		memset(new->offset, 0, sizeof(new->offset));
5644df5042f4SFlorian Westphal 		refcount_set(&new->refcnt, 1);
5645df5042f4SFlorian Westphal 	}
5646df5042f4SFlorian Westphal 
5647df5042f4SFlorian Westphal 	return new;
5648df5042f4SFlorian Westphal }
5649df5042f4SFlorian Westphal 
56504165079bSFlorian Westphal static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
56514165079bSFlorian Westphal 					 unsigned int old_active)
5652df5042f4SFlorian Westphal {
5653df5042f4SFlorian Westphal 	struct skb_ext *new;
5654df5042f4SFlorian Westphal 
5655df5042f4SFlorian Westphal 	if (refcount_read(&old->refcnt) == 1)
5656df5042f4SFlorian Westphal 		return old;
5657df5042f4SFlorian Westphal 
5658df5042f4SFlorian Westphal 	new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
5659df5042f4SFlorian Westphal 	if (!new)
5660df5042f4SFlorian Westphal 		return NULL;
5661df5042f4SFlorian Westphal 
5662df5042f4SFlorian Westphal 	memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
5663df5042f4SFlorian Westphal 	refcount_set(&new->refcnt, 1);
5664df5042f4SFlorian Westphal 
56654165079bSFlorian Westphal #ifdef CONFIG_XFRM
56664165079bSFlorian Westphal 	if (old_active & (1 << SKB_EXT_SEC_PATH)) {
56674165079bSFlorian Westphal 		struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
56684165079bSFlorian Westphal 		unsigned int i;
56694165079bSFlorian Westphal 
56704165079bSFlorian Westphal 		for (i = 0; i < sp->len; i++)
56714165079bSFlorian Westphal 			xfrm_state_hold(sp->xvec[i]);
56724165079bSFlorian Westphal 	}
56734165079bSFlorian Westphal #endif
5674df5042f4SFlorian Westphal 	__skb_ext_put(old);
5675df5042f4SFlorian Westphal 	return new;
5676df5042f4SFlorian Westphal }
5677df5042f4SFlorian Westphal 
5678df5042f4SFlorian Westphal /**
5679df5042f4SFlorian Westphal  * skb_ext_add - allocate space for given extension, COW if needed
5680df5042f4SFlorian Westphal  * @skb: buffer
5681df5042f4SFlorian Westphal  * @id: extension to allocate space for
5682df5042f4SFlorian Westphal  *
5683df5042f4SFlorian Westphal  * Allocates enough space for the given extension.
5684df5042f4SFlorian Westphal  * If the extension is already present, a pointer to that extension
5685df5042f4SFlorian Westphal  * is returned.
5686df5042f4SFlorian Westphal  *
5687df5042f4SFlorian Westphal  * If the skb was cloned, COW applies and the returned memory can be
5688df5042f4SFlorian Westphal  * modified without changing the extension space of clones buffers.
5689df5042f4SFlorian Westphal  *
5690df5042f4SFlorian Westphal  * Returns pointer to the extension or NULL on allocation failure.
5691df5042f4SFlorian Westphal  */
5692df5042f4SFlorian Westphal void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
5693df5042f4SFlorian Westphal {
5694df5042f4SFlorian Westphal 	struct skb_ext *new, *old = NULL;
5695df5042f4SFlorian Westphal 	unsigned int newlen, newoff;
5696df5042f4SFlorian Westphal 
5697df5042f4SFlorian Westphal 	if (skb->active_extensions) {
5698df5042f4SFlorian Westphal 		old = skb->extensions;
5699df5042f4SFlorian Westphal 
57004165079bSFlorian Westphal 		new = skb_ext_maybe_cow(old, skb->active_extensions);
5701df5042f4SFlorian Westphal 		if (!new)
5702df5042f4SFlorian Westphal 			return NULL;
5703df5042f4SFlorian Westphal 
5704682ec859SPaolo Abeni 		if (__skb_ext_exist(new, id))
5705df5042f4SFlorian Westphal 			goto set_active;
5706df5042f4SFlorian Westphal 
5707e94e50bdSPaolo Abeni 		newoff = new->chunks;
5708df5042f4SFlorian Westphal 	} else {
5709df5042f4SFlorian Westphal 		newoff = SKB_EXT_CHUNKSIZEOF(*new);
5710df5042f4SFlorian Westphal 
5711df5042f4SFlorian Westphal 		new = skb_ext_alloc();
5712df5042f4SFlorian Westphal 		if (!new)
5713df5042f4SFlorian Westphal 			return NULL;
5714df5042f4SFlorian Westphal 	}
5715df5042f4SFlorian Westphal 
5716df5042f4SFlorian Westphal 	newlen = newoff + skb_ext_type_len[id];
5717df5042f4SFlorian Westphal 	new->chunks = newlen;
5718df5042f4SFlorian Westphal 	new->offset[id] = newoff;
5719df5042f4SFlorian Westphal set_active:
5720682ec859SPaolo Abeni 	skb->extensions = new;
5721df5042f4SFlorian Westphal 	skb->active_extensions |= 1 << id;
5722df5042f4SFlorian Westphal 	return skb_ext_get_ptr(new, id);
5723df5042f4SFlorian Westphal }
5724df5042f4SFlorian Westphal EXPORT_SYMBOL(skb_ext_add);
5725df5042f4SFlorian Westphal 
57264165079bSFlorian Westphal #ifdef CONFIG_XFRM
57274165079bSFlorian Westphal static void skb_ext_put_sp(struct sec_path *sp)
57284165079bSFlorian Westphal {
57294165079bSFlorian Westphal 	unsigned int i;
57304165079bSFlorian Westphal 
57314165079bSFlorian Westphal 	for (i = 0; i < sp->len; i++)
57324165079bSFlorian Westphal 		xfrm_state_put(sp->xvec[i]);
57334165079bSFlorian Westphal }
57344165079bSFlorian Westphal #endif
57354165079bSFlorian Westphal 
5736df5042f4SFlorian Westphal void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
5737df5042f4SFlorian Westphal {
5738df5042f4SFlorian Westphal 	struct skb_ext *ext = skb->extensions;
5739df5042f4SFlorian Westphal 
5740df5042f4SFlorian Westphal 	skb->active_extensions &= ~(1 << id);
5741df5042f4SFlorian Westphal 	if (skb->active_extensions == 0) {
5742df5042f4SFlorian Westphal 		skb->extensions = NULL;
5743df5042f4SFlorian Westphal 		__skb_ext_put(ext);
57444165079bSFlorian Westphal #ifdef CONFIG_XFRM
57454165079bSFlorian Westphal 	} else if (id == SKB_EXT_SEC_PATH &&
57464165079bSFlorian Westphal 		   refcount_read(&ext->refcnt) == 1) {
57474165079bSFlorian Westphal 		struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
57484165079bSFlorian Westphal 
57494165079bSFlorian Westphal 		skb_ext_put_sp(sp);
57504165079bSFlorian Westphal 		sp->len = 0;
57514165079bSFlorian Westphal #endif
5752df5042f4SFlorian Westphal 	}
5753df5042f4SFlorian Westphal }
5754df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_del);
5755df5042f4SFlorian Westphal 
5756df5042f4SFlorian Westphal void __skb_ext_put(struct skb_ext *ext)
5757df5042f4SFlorian Westphal {
5758df5042f4SFlorian Westphal 	/* If this is last clone, nothing can increment
5759df5042f4SFlorian Westphal 	 * it after check passes.  Avoids one atomic op.
5760df5042f4SFlorian Westphal 	 */
5761df5042f4SFlorian Westphal 	if (refcount_read(&ext->refcnt) == 1)
5762df5042f4SFlorian Westphal 		goto free_now;
5763df5042f4SFlorian Westphal 
5764df5042f4SFlorian Westphal 	if (!refcount_dec_and_test(&ext->refcnt))
5765df5042f4SFlorian Westphal 		return;
5766df5042f4SFlorian Westphal free_now:
57674165079bSFlorian Westphal #ifdef CONFIG_XFRM
57684165079bSFlorian Westphal 	if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
57694165079bSFlorian Westphal 		skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
57704165079bSFlorian Westphal #endif
57714165079bSFlorian Westphal 
5772df5042f4SFlorian Westphal 	kmem_cache_free(skbuff_ext_cache, ext);
5773df5042f4SFlorian Westphal }
5774df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_put);
5775df5042f4SFlorian Westphal #endif /* CONFIG_SKB_EXTENSIONS */
5776